Migration to new setup

This commit is contained in:
Jonas Juselius
2019-10-15 15:33:43 +02:00
parent 7b59038e50
commit e4765df729
19 changed files with 346 additions and 3212 deletions

View File

@@ -1,6 +1,6 @@
{ pkgs, masterNode, etcdNodes, clusterHosts, certs, ...}:
{ pkgs, masterAddress, etcdNodes, clusterHosts, certs, ...}:
let
kubeApiserver = "https://${masterNode}:8443";
kubeApiserver = "https://${masterAddress}:8443";
localApiserver = "http://127.0.0.1:8080";
etcdEndpoints = builtins.map (x: "https://${x}:2379") etcdNodes;
etcdCluster = builtins.map (x: "${x}=https://${x}:2380") etcdNodes;
@@ -10,8 +10,8 @@ rec {
services.etcd = {
inherit name;
enable = true;
listenClientUrls = ["https://0.0.0.0:2379"];
listenPeerUrls = ["https://0.0.0.0:2380"];
listenClientUrls = [ "https://0.0.0.0:2379" ];
listenPeerUrls = [ "https://0.0.0.0:2380" ];
peerClientCertAuth = true;
keyFile = certs.etcd.key;
certFile = certs.etcd.cert;
@@ -30,7 +30,7 @@ rec {
systemd.services.flannel.after = [ "etcd.service" ];
};
clientConf = instance: {
clientConf = instance: {
server = kubeApiserver;
keyFile = certs.${instance}.key;
certFile = certs.${instance}.cert;
@@ -39,6 +39,7 @@ rec {
kubeNode = instance: {
services.kubernetes = rec {
inherit masterAddress;
roles = [ "node" ];
kubeconfig = clientConf instance;
kubelet = {
@@ -69,8 +70,8 @@ rec {
roles = [ "master" ];
kubelet.unschedulable = false;
apiserver = {
bindAddress = "0.0.0.0"; #masterNode;
advertiseAddress = masterNode;
bindAddress = "0.0.0.0"; #masterAddress;
advertiseAddress = masterAddress;
authorizationMode = [ "Node" "RBAC" ];
securePort = 8443;
tlsKeyFile = certs.apiserver.key;
@@ -80,12 +81,18 @@ rec {
kubeletClientKeyFile = certs.apiserver.key;
kubeletClientCertFile = certs.apiserver.cert;
serviceAccountKeyFile = certs.apiserver.key;
etcd = {
servers = etcdEndpoints;
keyFile = certs.apiserver.key;
certFile = certs.apiserver.cert;
caFile = certs.ca.cert;
};
};
scheduler.leaderElect = true;
controllerManager = {
leaderElect = true;
serviceAccountKeyFile = certs.apiserver.key;
rootCaFile = certs.ca.cert;
# rootCaFile = certs.ca.cert;
kubeconfig.server = localApiserver;
};
scheduler.kubeconfig.server = localApiserver;
@@ -96,7 +103,7 @@ rec {
version = "v1.10.0";
rbac.enable = true;
rbac.clusterAdmin = true;
tokenTtl = 0;
# tokenTtl = 0;
image = {
imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
imageDigest = "sha256:1d2e1229a918f4bc38b5a3f9f5f11302b3e71f8397b492afac7f273a0008776a";
@@ -114,16 +121,9 @@ rec {
kubeConfig = instance: {
services.kubernetes = {
verbose = false;
caFile = certs.ca.cert;
# caFile = certs.ca.cert;
flannel.enable = true;
clusterCidr = "10.10.0.0/16";
etcd = {
servers = etcdEndpoints;
keyFile = certs.apiserver.key;
certFile = certs.apiserver.cert;
caFile = certs.ca.cert;
};
proxy = {
kubeconfig = clientConf "kube-proxy";
};
@@ -149,9 +149,9 @@ rec {
networking = {
hostName = instance;
extraHosts = clusterHosts;
# nameservers = [ masterNode ];
# nameservers = [ masterAddress ];
# dhcpcd.extraConfig = ''
# static domain_name_servers=${masterNode}
# static domain_name_servers=${masterAddress}
# '';
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
firewall.allowedTCPPorts = [ 80 443 111 ];
@@ -200,15 +200,5 @@ rec {
(kubeConfig name)
(kubeNode name)
];
services.dockerRegistry = {
enable = true;
listenAddress = "0.0.0.0";
enableDelete = true;
enableGarbageCollect = true;
extraConfig = {
REGISTRY_HTTP_TLS_CERTIFICATE = "${certs.apiserver.cert}";
REGISTRY_HTTP_TLS_KEY = "${certs.apiserver.key}";
};
};
};
}