Simplify configs.
This commit is contained in:
40
k8s.nix
40
k8s.nix
@@ -39,13 +39,6 @@ let
|
||||
};
|
||||
};
|
||||
|
||||
etcdClient = node:{
|
||||
servers = [ "https://etcd0:2379" "https://etcd1:2379" ];
|
||||
certFile = ./pki + "/${node}.pem";
|
||||
keyFile = ./pki + "/${node}-key.pem";
|
||||
caFile = ./pki/ca.pem;
|
||||
};
|
||||
|
||||
kubeConfig = node: {
|
||||
require = [ (flannelConfig node) ];
|
||||
networking.firewall.allowedUDPPorts = [ 8472 ]; # VXLAN
|
||||
@@ -54,10 +47,15 @@ let
|
||||
systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/subnet.env";
|
||||
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --bip $FLANNEL_SUBNET";
|
||||
# services.kubernetes.verbose = true;
|
||||
services.kubernetes.etcd = {
|
||||
servers = [ "https://etcd0:2379" "https://etcd1:2379" ];
|
||||
certFile = ./pki + "/${node}.pem";
|
||||
keyFile = ./pki + "/${node}-key.pem";
|
||||
caFile = ./pki/ca.pem;
|
||||
};
|
||||
};
|
||||
|
||||
kubeNode = doConfig: node: {
|
||||
require = if doConfig then [ (kubeConfig node) ] else [];
|
||||
services.kubernetes = {
|
||||
roles = [ "node" ];
|
||||
kubeconfig = {
|
||||
@@ -70,14 +68,12 @@ let
|
||||
tlsCertFile = ./pki + "/${node}.pem";
|
||||
tlsKeyFile = ./pki + "/${node}-key.pem";
|
||||
networkPlugin = null;
|
||||
clusterDns = "10.253.18.100";
|
||||
clusterDns = "kubernetes";
|
||||
};
|
||||
etcd = if doConfig then (etcdClient node) else {};
|
||||
};
|
||||
};
|
||||
|
||||
kubeMaster = node: {
|
||||
require = [ (kubeConfig node) (kubeNode false node)];
|
||||
services.dockerRegistry = {
|
||||
enable = true;
|
||||
listenAddress = "0.0.0.0";
|
||||
@@ -94,7 +90,6 @@ let
|
||||
# kubeletClientCertFile = ./pki + "/${node}.pem";
|
||||
# kubeletClientKeyFile = ./pki + "/${node}-key.pem";
|
||||
};
|
||||
etcd = (etcdClient node);
|
||||
scheduler.leaderElect = true;
|
||||
controllerManager.leaderElect = true;
|
||||
controllerManager.serviceAccountKeyFile = ./pki/apiserver-key.pem;
|
||||
@@ -106,8 +101,9 @@ let
|
||||
};
|
||||
|
||||
baseConfig = node: {
|
||||
networking.hostName = node;
|
||||
imports = [ (./hw + "/${node}.nix") ./base/configuration.nix ];
|
||||
require = [ (kubeConfig node) ];
|
||||
networking.hostName = node;
|
||||
networking.extraHosts = ''
|
||||
10.253.18.100 etcd0 kubernetes
|
||||
10.253.18.101 etcd1
|
||||
@@ -117,20 +113,23 @@ in
|
||||
{
|
||||
k8s0-0 = { config, lib, pkgs, ... }:
|
||||
let
|
||||
host = "k8s0-0";
|
||||
etcd = etcdConfig "etcd0";
|
||||
base = baseConfig "k8s0-0";
|
||||
master = kubeMaster "k8s0-0";
|
||||
base = baseConfig host;
|
||||
master = kubeMaster host;
|
||||
node = kubeNode true host;
|
||||
in
|
||||
{
|
||||
deployment.targetHost = "10.253.18.100";
|
||||
require = [ base etcd master ];
|
||||
require = [ base etcd master node ];
|
||||
};
|
||||
|
||||
k8s0-1 = { config, lib, pkgs, ... }:
|
||||
let
|
||||
host = "k8s0-1";
|
||||
etcd = etcdConfig "etcd1";
|
||||
base = baseConfig "k8s0-1";
|
||||
node = kubeNode true "k8s0-1";
|
||||
base = baseConfig host;
|
||||
node = kubeNode true host;
|
||||
in
|
||||
{
|
||||
deployment.targetHost = "10.253.18.101";
|
||||
@@ -139,8 +138,9 @@ in
|
||||
|
||||
k8s0-2 = { config, lib, pkgs, ... }:
|
||||
let
|
||||
base = baseConfig "k8s0-2";
|
||||
node = kubeNode true "k8s0-2";
|
||||
host = "k8s0-2";
|
||||
base = baseConfig host;
|
||||
node = kubeNode true host;
|
||||
in
|
||||
{
|
||||
deployment.targetHost = "10.253.18.102";
|
||||
|
||||
Reference in New Issue
Block a user