From 66d29be22ca2ee86c226b726bce2fd501b910ac0 Mon Sep 17 00:00:00 2001 From: Jonas Juselius Date: Sat, 23 Feb 2019 15:34:28 +0100 Subject: [PATCH] Secure certificates after generation --- .gitignore | 1 + bin/deploy.sh | 15 +- initca.nix | 5 - kube0/build.nix | 2 + kube0/certs.nix | 36 + kube0.nix => kube0/deployment.nix | 68 +- kube1.nix | 46 - kube1/build.nix | 2 + kube1/certs.nix | 29 + kube1/deployment.nix | 38 + lib/k8s.nix | 34 +- lib/pki.nix | 24 +- nixos/configuration.nix | 16 +- nixos/hardware-configuration/k1-2.nix | 21 + nixos/overlays/dashboard.nix | 330 +++++++ nixos/overlays/dns.nix | 329 +++++++ nixos/overlays/kubernetes.nix | 22 +- nixos/overlays/kubernetes.nix.bak.2 | 1216 +++++++++++++++++++++++++ nixos/overlays/overlays.nix | 8 +- 19 files changed, 2098 insertions(+), 144 deletions(-) delete mode 100644 initca.nix create mode 100644 kube0/build.nix create mode 100644 kube0/certs.nix rename kube0.nix => kube0/deployment.nix (51%) delete mode 100644 kube1.nix create mode 100644 kube1/build.nix create mode 100644 kube1/certs.nix create mode 100644 kube1/deployment.nix create mode 100644 nixos/hardware-configuration/k1-2.nix create mode 100644 nixos/overlays/dashboard.nix create mode 100644 nixos/overlays/dns.nix create mode 100644 nixos/overlays/kubernetes.nix.bak.2 diff --git a/.gitignore b/.gitignore index 9135c35..6a2fe53 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ *.csr result result-* +gcroots/ diff --git a/bin/deploy.sh b/bin/deploy.sh index 335a240..362b516 100755 --- a/bin/deploy.sh +++ b/bin/deploy.sh @@ -5,5 +5,18 @@ if [ $# = 0 ]; then exit 1 fi -nixops modify -d $1 $1.nix +if [ ! -f $1/deployment.nix ]; then + echo "error: $1 does not contain a deployment" + exit 1 +fi + +mkdir -p $1/gcroots + +echo "--- Securing certifiates" +nix-build -o $1/gcroots/certs $1/build.nix + +echo "--- Updating deployment" +nixops modify -d $1 $1/deployment.nix + +echo "--- Deploying $1" nixops deploy -d $* diff --git a/initca.nix b/initca.nix deleted file mode 100644 index 4784a7e..0000000 --- a/initca.nix +++ /dev/null @@ -1,5 +0,0 @@ -with import {}; -let - pki = pkgs.callPackage ./lib/pki.nix {}; -in - pki.initca diff --git a/kube0/build.nix b/kube0/build.nix new file mode 100644 index 0000000..0961011 --- /dev/null +++ b/kube0/build.nix @@ -0,0 +1,2 @@ +with import {}; +pkgs.callPackage ./certs.nix {} diff --git a/kube0/certs.nix b/kube0/certs.nix new file mode 100644 index 0000000..7965c9e --- /dev/null +++ b/kube0/certs.nix @@ -0,0 +1,36 @@ +{ pkgs, ...}: +let + pki = pkgs.callPackage ../lib/pki.nix {}; +in +{ + initca = pki.initca; + ca = pki.ca; + apiserver = pki.apiserver '' + "10.253.18.100", + "10.0.0.1", + "127.0.0.1", + "kubernetes", + "kubernetes.default.svc", + "etcd0", + "fs0-2", + "k0-0" + ''; + kube-proxy = pki.kube-proxy; + admin = pki.admin; + etcd = pki.etcd '' + "etcd0", + "etcd1", + "etcd2", + "10.253.18.100", + "10.253.18.101", + "10.253.18.102", + "127.0.0.1" + ''; + k0-0 = pki.worker { name = "k0-0"; ip = "10.253.18.100"; }; + k0-1 = pki.worker { name = "k0-1"; ip = "10.253.18.101"; }; + k0-2 = pki.worker { name = "k0-2"; ip = "10.253.18.102"; }; + k0-3 = pki.worker { name = "k0-3"; ip = "10.253.18.103"; }; + k0-4 = pki.worker { name = "k0-4"; ip = "10.253.18.107"; }; + k0-5 = pki.worker { name = "k0-5"; ip = "10.253.18.108"; }; +} + diff --git a/kube0.nix b/kube0/deployment.nix similarity index 51% rename from kube0.nix rename to kube0/deployment.nix index b2009a0..b1ed480 100644 --- a/kube0.nix +++ b/kube0/deployment.nix @@ -1,42 +1,12 @@ with import {}; let - pki = pkgs.callPackage ./lib/pki.nix {}; - certs = { - ca = pki.ca; - apiserver = pki.apiserver '' - "10.253.18.100", - "10.0.0.1", - "127.0.0.1", - "kubernetes", - "kubernetes.default.svc", - "etcd0", - "fs0-2", - "k0-0" - ''; - kube-proxy = pki.kube-proxy; - admin = pki.admin; - etcd = pki.etcd '' - "etcd0", - "etcd1", - "etcd2", - "10.253.18.100", - "10.253.18.101", - "10.253.18.102", - "127.0.0.1" - ''; - k0-0 = pki.worker { name = "k0-0"; ip = "10.253.18.100"; }; - k0-1 = pki.worker { name = "k0-1"; ip = "10.253.18.101"; }; - k0-2 = pki.worker { name = "k0-2"; ip = "10.253.18.102"; }; - k0-3 = pki.worker { name = "k0-3"; ip = "10.253.18.103"; }; - k0-4 = pki.worker { name = "k0-4"; ip = "10.253.18.107"; }; - k0-5 = pki.worker { name = "k0-5"; ip = "10.253.18.108"; }; - }; - cluster = callPackage ./lib/k8s.nix { + certs = pkgs.callPackage ./certs.nix {}; + pki = pkgs.callPackage ../lib/pki.nix {}; + cluster = callPackage ../lib/k8s.nix { masterNode = "10.253.18.100"; etcdNodes = [ "etcd0" "etcd1" "etcd2" ]; clusterHosts = '' 10.253.18.100 k0-0 etcd0 kubernetes - 10.253.18.100 itp-registry helm-registry.local 10.253.18.101 k0-1 etcd1 10.253.18.102 k0-2 etcd2 10.253.18.103 k0-3 @@ -45,9 +15,23 @@ let 10.253.18.106 fs0-0 fs0-0.local 10.1.2.164 fs0-1 fs0-1.local 10.253.18.100 fs0-2 fs0-2.local + 10.253.18.100 itp-registry registry.itpartner.no minio.itpartner.no + 10.253.18.100 nuget.itpartner.no 10.253.18.109 k1-0 ''; - inherit certs; + certs = { + ca = certs.ca; + apiserver = pki.toSet certs.apiserver; + kube-proxy = pki.toSet certs.kube-proxy; + admin = pki.toSet certs.admin; + etcd = pki.toSet certs.etcd; + k0-0 = pki.toSet certs.k0-0; + k0-1 = pki.toSet certs.k0-1; + k0-2 = pki.toSet certs.k0-2; + k0-3 = pki.toSet certs.k0-3; + k0-4 = pki.toSet certs.k0-4; + k0-5 = pki.toSet certs.k0-5; + }; }; in { @@ -58,7 +42,7 @@ in "dm_mirror" "dm_thin_pool" ]; - services.dnsmasq.enable = true; + # services.dnsmasq.enable = true; fileSystems."/data" = { device = "fs0-0:gv0"; fsType = "glusterfs"; @@ -71,11 +55,11 @@ in networking.extraHosts = '' 10.253.18.100 itp-registry itp-registry.local 10.253.18.100 helm-registry helm-registry.local + 10.253.18.100 gitlab.itpartner.no registry.itpartner.no minio.itpartner.no 10.253.18.100 nuget.local - 10.253.18.100 kibana.local - 10.253.18.100 dashboard.cluster.local - 10.253.18.100 gitlab.cluster.local - 10.253.18.100 baywash.cluster.local + 10.253.18.100 dashboard.k0.local + 10.253.18.100 gitlab.k0.local + 10.253.18.100 baywash.k0.local ''; systemd.services.gitlab-upgrade = { description = "Upgrade gitlab by zapping pod"; @@ -90,10 +74,4 @@ in k0-3 = cluster.worker "10.253.18.103" "k0-3"; k0-4 = cluster.worker "10.253.18.107" "k0-4"; k0-5 = cluster.worker "10.253.18.108" "k0-5"; - # k0-0 = cluster.plain "10.253.18.100" "k0-0"; - # k0-1 = cluster.plain "10.253.18.101" "k0-1"; - # k0-2 = cluster.plain "10.253.18.102" "k0-2"; - # k0-3 = cluster.plain "10.253.18.103" "k0-3"; - # k0-4 = cluster.plain "10.253.18.107" "k0-4"; - # k0-5 = cluster.plain "10.253.18.108" "k0-5"; } diff --git a/kube1.nix b/kube1.nix deleted file mode 100644 index 1236a83..0000000 --- a/kube1.nix +++ /dev/null @@ -1,46 +0,0 @@ -with import {}; -let - pki = pkgs.callPackage ./lib/pki.nix {}; - certs = { - ca = pki.ca; - apiserver = pki.apiserver '' - "10.253.18.109", - "10.0.0.1", - "127.0.0.1", - "kubernetes", - "etcd0", - "k1-0" - ''; - kube-proxy = pki.kube-proxy; - admin = pki.admin; - etcd = pki.etcd '' - "etcd0", - "etcd1", - "10.253.18.109", - "10.253.18.110", - "127.0.0.1" - ''; - k1-0 = pki.worker { name = "k1-0"; ip = "10.253.18.109"; }; - k1-1 = pki.worker { name = "k1-1"; ip = "10.253.18.110"; }; - }; - cluster = callPackage ./lib/k8s.nix { - masterNode = "10.253.18.109"; - etcdNodes = [ "etcd0" "etcd1" ]; - clusterHosts = '' - 10.253.18.109 k1-0 etcd0 kubernetes fs0-2 - 10.253.18.110 k1-1 etcd1 - 10.253.18.106 fs0-0 - 10.1.2.164 fs0-1 - 10.253.18.100 k0-0 - ''; - inherit certs; - }; -in -{ - k1-0 = { ... }: - { - require = [ (cluster.apiserver "10.253.18.109" "k1-0" "etcd0") ]; - services.dnsmasq.enable = true; - }; - k1-1 = cluster.server "10.253.18.110" "k1-1" "etcd1"; -} diff --git a/kube1/build.nix b/kube1/build.nix new file mode 100644 index 0000000..0961011 --- /dev/null +++ b/kube1/build.nix @@ -0,0 +1,2 @@ +with import {}; +pkgs.callPackage ./certs.nix {} diff --git a/kube1/certs.nix b/kube1/certs.nix new file mode 100644 index 0000000..efd469d --- /dev/null +++ b/kube1/certs.nix @@ -0,0 +1,29 @@ +{ pkgs, ...}: +let + pki = pkgs.callPackage ../lib/pki.nix {}; +in +{ + initca = pki.initca; + ca = pki.ca; + apiserver = pki.apiserver '' + "10.253.18.109", + "10.0.0.1", + "127.0.0.1", + "kubernetes", + "etcd0", + "k1-0" + ''; + kube-proxy = pki.kube-proxy; + admin = pki.admin; + etcd = pki.etcd '' + "etcd0", + "etcd1", + "10.253.18.109", + "10.253.18.110", + "127.0.0.1" + ''; + k1-0 = pki.worker { name = "k1-0"; ip = "10.253.18.109"; }; + k1-1 = pki.worker { name = "k1-1"; ip = "10.253.18.110"; }; + k1-2 = pki.worker { name = "k1-2"; ip = "10.253.18.111"; }; +} + diff --git a/kube1/deployment.nix b/kube1/deployment.nix new file mode 100644 index 0000000..0731b3f --- /dev/null +++ b/kube1/deployment.nix @@ -0,0 +1,38 @@ +with import {}; +let + certs = pkgs.callPackage ./certs.nix {}; + pki = pkgs.callPackage ../lib/pki.nix {}; + cluster = callPackage ../lib/k8s.nix { + masterNode = "10.253.18.109"; + etcdNodes = [ "etcd0" "etcd1" ]; + clusterHosts = '' + 10.253.18.109 k1-0 etcd0 kubernetes fs0-2 + 10.253.18.110 k1-1 etcd1 + 10.253.18.111 k1-2 + 10.253.18.106 fs0-0 + 10.1.2.164 fs0-1 + 10.253.18.100 k0-0 + 10.253.18.100 gitlab.itpartner.no registry.itpartner.no minio.itpartner.no + 10.253.18.109 gitlab.k1.local registry.k1.local minio.k1.local + 10.253.18.100 itp-registry itp-registry.local + ''; + certs = { + ca = certs.ca; + apiserver = pki.toSet certs.apiserver; + kube-proxy = pki.toSet certs.kube-proxy; + admin = pki.toSet certs.admin; + etcd = pki.toSet certs.etcd; + k1-0 = pki.toSet certs.k1-0; + k1-1 = pki.toSet certs.k1-1; + k1-2 = pki.toSet certs.k1-2; + }; + }; +in +{ + k1-0 = { ... }: + { + require = [ (cluster.apiserver "10.253.18.109" "k1-0" "etcd0") ]; + }; + k1-1 = cluster.server "10.253.18.110" "k1-1" "etcd1"; + k1-2 = cluster.worker "10.253.18.111" "k1-2"; +} diff --git a/lib/k8s.nix b/lib/k8s.nix index 7e9b4ba..3da0d1f 100644 --- a/lib/k8s.nix +++ b/lib/k8s.nix @@ -55,8 +55,8 @@ rec { networking = { firewall = { enable = true; - allowedTCPPorts = [ 53 4194 10250 ]; - allowedUDPPorts = [ 53 ]; + allowedTCPPorts = [ 4194 10250 ]; + # allowedUDPPorts = [ 53 ]; extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE''; }; }; @@ -69,7 +69,7 @@ rec { roles = [ "master" ]; kubelet.unschedulable = false; apiserver = { - bindAddress = "0.0.0.0"; #masterNode; + bindAddress = "0.0.0.0"; #masterNode; advertiseAddress = masterNode; authorizationMode = [ "Node" "RBAC" ]; securePort = 8443; @@ -89,20 +89,25 @@ rec { kubeconfig.server = localApiserver; }; scheduler.kubeconfig.server = localApiserver; + addons.dns.enable = true; + addons.dns.reconcileMode = "EnsureExists"; addons.dashboard = rec { enable = true; version = "v1.10.0"; + rbac.enable = true; + rbac.clusterAdmin = true; + tokenTtl = 0; image = { - imageName = "k8s.gcr.io/kubernetes-dashboard-amd64"; - imageDigest = "sha256:1d2e1229a918f4bc38b5a3f9f5f11302b3e71f8397b492afac7f273a0008776a"; - finalImageTag = version; - sha256 = "10qkqqhzkr0bcv0dlf8nq069h190pw6zjj1l5s5g438g80v8639j"; + imageName = "k8s.gcr.io/kubernetes-dashboard-amd64"; + imageDigest = "sha256:1d2e1229a918f4bc38b5a3f9f5f11302b3e71f8397b492afac7f273a0008776a"; + finalImageTag = version; + sha256 = "10qkqqhzkr0bcv0dlf8nq069h190pw6zjj1l5s5g438g80v8639j"; }; }; }; networking.firewall = { - allowedTCPPorts = [ 5000 8080 8443 ]; #;4053 ]; - # allowedUDPPorts = [ 4053 ]; + allowedTCPPorts = [ 53 5000 8080 8443 ]; #;4053 ]; + allowedUDPPorts = [ 53 4053 ]; }; environment.systemPackages = [ pkgs.kubernetes-helm ]; }; @@ -130,6 +135,9 @@ rec { (../nixos/hardware-configuration + "/${instance}.nix") ../nixos/configuration.nix ]; + security.pki.certificateFiles = [ + certs.ca.cert + ]; services.glusterfs = { enable = true; tlsSettings = { @@ -141,12 +149,14 @@ rec { networking = { hostName = instance; extraHosts = clusterHosts; + # nameservers = [ masterNode ]; + # dhcpcd.extraConfig = '' + # static domain_name_servers=${masterNode} + # ''; firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ]; firewall.allowedTCPPorts = [ 80 443 111 ]; firewall.allowedUDPPorts = [ 111 24007 24008 ]; }; - environment.systemPackages = [ pkgs.tshark ]; - # services.dnsmasq.enable = true; }; plain = ip: name: { config, lib, pkgs, ... }: @@ -193,6 +203,8 @@ rec { services.dockerRegistry = { enable = true; listenAddress = "0.0.0.0"; + enableDelete = true; + enableGarbageCollect = true; extraConfig = { REGISTRY_HTTP_TLS_CERTIFICATE = "${certs.apiserver.cert}"; REGISTRY_HTTP_TLS_KEY = "${certs.apiserver.key}"; diff --git a/lib/pki.nix b/lib/pki.nix index 7c680e6..671ae6e 100644 --- a/lib/pki.nix +++ b/lib/pki.nix @@ -36,7 +36,7 @@ } ''; - initca = + initca' = let ca_csr = gencsr { name = "kubernetes"; @@ -51,9 +51,9 @@ mkdir -p $out; cp *.pem $out''; # make ca derivation sha depend on initca cfssl output - initca' = pkgs.stdenv.mkDerivation { + initca = pkgs.stdenv.mkDerivation { name = "ca"; - src = initca; + src = initca'; buildCommand = '' mkdir -p $out; cp -r $src/* $out @@ -61,8 +61,8 @@ }; ca = { - key = "${initca'}/ca-key.pem"; - cert = "${initca'}/ca.pem"; + key = "${initca}/ca-key.pem"; + cert = "${initca}/ca.pem"; }; cfssl = conf: '' @@ -72,16 +72,16 @@ mkdir -p $out; cp *.pem $out ''; + toSet = cert: + { + key = "${cert}/cert-key.pem"; + cert = "${cert}/cert.pem"; + }; + gencert = conf: - let - drv = pkgs.runCommand "${conf.name}" { + pkgs.runCommand "${conf.name}" { buildInputs = [ pkgs.cfssl ]; } (cfssl conf); - in - { - key = "${drv}/cert-key.pem"; - cert = "${drv}/cert.pem"; - }; admin = gencert rec { name = "admin"; diff --git a/nixos/configuration.nix b/nixos/configuration.nix index 1fe9cf4..fe2d29f 100644 --- a/nixos/configuration.nix +++ b/nixos/configuration.nix @@ -27,17 +27,23 @@ security.rtkit.enable = true; - disabledModules = [ "services/cluster/kubernetes/default.nix" ]; + disabledModules = [ + # "services/cluster/kubernetes/default.nix" + "services/cluster/kubernetes/dns.nix" + "services/cluster/kubernetes/dashboard.nix" + ]; imports = [ ./users.nix ./packages.nix - ./overlays/kubernetes.nix + ./overlays/dns.nix + ./overlays/dashboard.nix + # ./overlays/kubernetes.nix ]; - nixpkgs.overlays = [ - (import ./overlays/overlays.nix) - ]; + # nixpkgs.overlays = [ + # (import ./overlays/overlays.nix) + # ]; users.extraUsers.root.openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas" diff --git a/nixos/hardware-configuration/k1-2.nix b/nixos/hardware-configuration/k1-2.nix new file mode 100644 index 0000000..f0575f4 --- /dev/null +++ b/nixos/hardware-configuration/k1-2.nix @@ -0,0 +1,21 @@ +# Do not modify this file! It was generated by ‘nixos-generate-config’ +# and may be overwritten by future invocations. Please make changes +# to /etc/nixos/configuration.nix instead. +{ config, lib, pkgs, ... }: + +{ + imports = [ ]; + + boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ]; + boot.kernelModules = [ ]; + boot.extraModulePackages = [ ]; + + fileSystems."/" = + { device = "/dev/disk/by-uuid/83bb471d-1db7-4c0b-b8aa-8111730a1ea9"; + fsType = "ext4"; + }; + + swapDevices = [ ]; + + nix.maxJobs = lib.mkDefault 1; +} diff --git a/nixos/overlays/dashboard.nix b/nixos/overlays/dashboard.nix new file mode 100644 index 0000000..250eb31 --- /dev/null +++ b/nixos/overlays/dashboard.nix @@ -0,0 +1,330 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + cfg = config.services.kubernetes.addons.dashboard; +in { + options.services.kubernetes.addons.dashboard = { + enable = mkEnableOption "kubernetes dashboard addon"; + + rbac = mkOption { + description = "Role-based access control (RBAC) options"; + default = {}; + type = types.submodule { + options = { + enable = mkOption { + description = "Whether to enable role based access control is enabled for kubernetes dashboard"; + type = types.bool; + default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode; + }; + + clusterAdmin = mkOption { + description = "Whether to assign cluster admin rights to the kubernetes dashboard"; + type = types.bool; + default = false; + }; + }; + }; + }; + + version = mkOption { + description = "Which version of the kubernetes dashboard to deploy"; + type = types.str; + default = "v1.8.3"; + }; + + image = mkOption { + description = "Docker image to seed for the kubernetes dashboard container."; + type = types.attrs; + default = { + imageName = "k8s.gcr.io/kubernetes-dashboard-amd64"; + imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0"; + finalImageTag = cfg.version; + sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8"; + }; + }; + + tokenTtl = mkOption { + description = "Expiration time (in seconds) of JWE tokens generated by dashboard. Default: 15 min. 0 - never expires."; + type = types.int; + default = 15; + }; + }; + + config = mkIf cfg.enable { + services.kubernetes.kubelet.seedDockerImages = [(pkgs.dockerTools.pullImage cfg.image)]; + + services.kubernetes.addonManager.addons = { + kubernetes-dashboard-deployment = { + kind = "Deployment"; + apiVersion = "apps/v1"; + metadata = { + labels = { + k8s-addon = "kubernetes-dashboard.addons.k8s.io"; + k8s-app = "kubernetes-dashboard"; + version = cfg.version; + "kubernetes.io/cluster-service" = "true"; + "addonmanager.kubernetes.io/mode" = "Reconcile"; + }; + name = "kubernetes-dashboard"; + namespace = "kube-system"; + }; + spec = { + replicas = 1; + revisionHistoryLimit = 10; + selector.matchLabels."k8s-app" = "kubernetes-dashboard"; + template = { + metadata = { + labels = { + k8s-addon = "kubernetes-dashboard.addons.k8s.io"; + k8s-app = "kubernetes-dashboard"; + version = cfg.version; + "kubernetes.io/cluster-service" = "true"; + }; + annotations = { + "scheduler.alpha.kubernetes.io/critical-pod" = ""; + }; + }; + spec = { + priorityClassName = "system-cluster-critical"; + containers = [{ + name = "kubernetes-dashboard"; + image = with cfg.image; "${imageName}:${finalImageTag}"; + ports = [{ + containerPort = 8443; + protocol = "TCP"; + }]; + resources = { + limits = { + cpu = "100m"; + memory = "300Mi"; + }; + requests = { + cpu = "100m"; + memory = "100Mi"; + }; + }; + args = [ + "--auto-generate-certificates" + "--token-ttl=${toString cfg.tokenTtl}" + ]; + volumeMounts = [{ + name = "tmp-volume"; + mountPath = "/tmp"; + } { + name = "kubernetes-dashboard-certs"; + mountPath = "/certs"; + }]; + livenessProbe = { + httpGet = { + scheme = "HTTPS"; + path = "/"; + port = 8443; + }; + initialDelaySeconds = 30; + timeoutSeconds = 30; + }; + }]; + volumes = [{ + name = "kubernetes-dashboard-certs"; + secret = { + secretName = "kubernetes-dashboard-certs"; + }; + } { + name = "tmp-volume"; + emptyDir = {}; + }]; + serviceAccountName = "kubernetes-dashboard"; + tolerations = [{ + key = "node-role.kubernetes.io/master"; + effect = "NoSchedule"; + } { + key = "CriticalAddonsOnly"; + operator = "Exists"; + }]; + }; + }; + }; + }; + + kubernetes-dashboard-svc = { + apiVersion = "v1"; + kind = "Service"; + metadata = { + labels = { + k8s-addon = "kubernetes-dashboard.addons.k8s.io"; + k8s-app = "kubernetes-dashboard"; + "kubernetes.io/cluster-service" = "true"; + "kubernetes.io/name" = "KubeDashboard"; + "addonmanager.kubernetes.io/mode" = "Reconcile"; + }; + name = "kubernetes-dashboard"; + namespace = "kube-system"; + }; + spec = { + ports = [{ + port = 443; + targetPort = 8443; + }]; + selector.k8s-app = "kubernetes-dashboard"; + }; + }; + + kubernetes-dashboard-sa = { + apiVersion = "v1"; + kind = "ServiceAccount"; + metadata = { + labels = { + k8s-app = "kubernetes-dashboard"; + k8s-addon = "kubernetes-dashboard.addons.k8s.io"; + "addonmanager.kubernetes.io/mode" = "Reconcile"; + }; + name = "kubernetes-dashboard"; + namespace = "kube-system"; + }; + }; + kubernetes-dashboard-sec-certs = { + apiVersion = "v1"; + kind = "Secret"; + metadata = { + labels = { + k8s-app = "kubernetes-dashboard"; + # Allows editing resource and makes sure it is created first. + "addonmanager.kubernetes.io/mode" = "EnsureExists"; + }; + name = "kubernetes-dashboard-certs"; + namespace = "kube-system"; + }; + type = "Opaque"; + }; + kubernetes-dashboard-sec-kholder = { + apiVersion = "v1"; + kind = "Secret"; + metadata = { + labels = { + k8s-app = "kubernetes-dashboard"; + # Allows editing resource and makes sure it is created first. + "addonmanager.kubernetes.io/mode" = "EnsureExists"; + }; + name = "kubernetes-dashboard-key-holder"; + namespace = "kube-system"; + }; + type = "Opaque"; + }; + kubernetes-dashboard-cm = { + apiVersion = "v1"; + kind = "ConfigMap"; + metadata = { + labels = { + k8s-app = "kubernetes-dashboard"; + # Allows editing resource and makes sure it is created first. + "addonmanager.kubernetes.io/mode" = "EnsureExists"; + }; + name = "kubernetes-dashboard-settings"; + namespace = "kube-system"; + }; + }; + } // (optionalAttrs cfg.rbac.enable + (let + subjects = [{ + kind = "ServiceAccount"; + name = "kubernetes-dashboard"; + namespace = "kube-system"; + }]; + labels = { + k8s-app = "kubernetes-dashboard"; + k8s-addon = "kubernetes-dashboard.addons.k8s.io"; + "addonmanager.kubernetes.io/mode" = "Reconcile"; + }; + in + (if cfg.rbac.clusterAdmin then { + kubernetes-dashboard-crb = { + apiVersion = "rbac.authorization.k8s.io/v1"; + kind = "ClusterRoleBinding"; + metadata = { + name = "kubernetes-dashboard"; + inherit labels; + }; + roleRef = { + apiGroup = "rbac.authorization.k8s.io"; + kind = "ClusterRole"; + name = "cluster-admin"; + }; + inherit subjects; + }; + } + else + { + # Upstream role- and rolebinding as per: + # https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/alternative/kubernetes-dashboard.yaml + kubernetes-dashboard-role = { + apiVersion = "rbac.authorization.k8s.io/v1"; + kind = "Role"; + metadata = { + name = "kubernetes-dashboard-minimal"; + namespace = "kube-system"; + inherit labels; + }; + rules = [ + # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. + { + apiGroups = [""]; + resources = ["secrets"]; + verbs = ["create"]; + } + # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. + { + apiGroups = [""]; + resources = ["configmaps"]; + verbs = ["create"]; + } + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + { + apiGroups = [""]; + resources = ["secrets"]; + resourceNames = ["kubernetes-dashboard-key-holder"]; + verbs = ["get" "update" "delete"]; + } + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + { + apiGroups = [""]; + resources = ["configmaps"]; + resourceNames = ["kubernetes-dashboard-settings"]; + verbs = ["get" "update"]; + } + # Allow Dashboard to get metrics from heapster. + { + apiGroups = [""]; + resources = ["services"]; + resourceNames = ["heapster"]; + verbs = ["proxy"]; + } + { + apiGroups = [""]; + resources = ["services/proxy"]; + resourceNames = ["heapster" "http:heapster:" "https:heapster:"]; + verbs = ["get"]; + } + ]; + }; + + kubernetes-dashboard-rb = { + apiVersion = "rbac.authorization.k8s.io/v1"; + kind = "RoleBinding"; + metadata = { + name = "kubernetes-dashboard-minimal"; + namespace = "kube-system"; + inherit labels; + }; + roleRef = { + apiGroup = "rbac.authorization.k8s.io"; + kind = "Role"; + name = "kubernetes-dashboard-minimal"; + }; + inherit subjects; + }; + }) + )); + }; +} diff --git a/nixos/overlays/dns.nix b/nixos/overlays/dns.nix new file mode 100644 index 0000000..ce08ce2 --- /dev/null +++ b/nixos/overlays/dns.nix @@ -0,0 +1,329 @@ +{ config, pkgs, lib, ... }: + +with lib; + +let + version = "1.2.5"; + cfg = config.services.kubernetes.addons.dns; + ports = { + dns = 10053; + health = 10054; + metrics = 10055; + }; +in { + options.services.kubernetes.addons.dns = { + enable = mkEnableOption "kubernetes dns addon"; + + clusterIp = mkOption { + description = "Dns addon clusterIP"; + + # this default is also what kubernetes users + default = ( + concatStringsSep "." ( + take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange + )) + ) + ".254"; + type = types.str; + }; + + clusterDomain = mkOption { + description = "Dns cluster domain"; + default = "cluster.local"; + type = types.str; + }; + + replicas = mkOption { + description = "Number of DNS pod replicas to deploy in the cluster."; + default = 2; + type = types.int; + }; + + reconcileMode = mkOption { + description = '' + Controls the addon manager reconciliation mode for the DNS addon. + See: + ''; + default = "Reconcile"; + type = types.enum [ "Reconcile" "EnsureExists" ]; + }; + + coredns = mkOption { + description = "Docker image to seed for the CoreDNS container."; + type = types.attrs; + default = { + imageName = "coredns/coredns"; + imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6"; + finalImageTag = version; + sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i"; + }; + }; + }; + + config = mkIf cfg.enable { + services.kubernetes.kubelet.seedDockerImages = + singleton (pkgs.dockerTools.pullImage cfg.coredns); + + services.kubernetes.addonManager.addons = { + coredns-sa = { + apiVersion = "v1"; + kind = "ServiceAccount"; + metadata = { + labels = { + "addonmanager.kubernetes.io/mode" = "Reconcile"; + "k8s-app" = "kube-dns"; + "kubernetes.io/cluster-service" = "true"; + }; + name = "coredns"; + namespace = "kube-system"; + }; + }; + + coredns-cr = { + apiVersion = "rbac.authorization.k8s.io/v1beta1"; + kind = "ClusterRole"; + metadata = { + labels = { + "addonmanager.kubernetes.io/mode" = "Reconcile"; + "k8s-app" = "kube-dns"; + "kubernetes.io/cluster-service" = "true"; + "kubernetes.io/bootstrapping" = "rbac-defaults"; + }; + name = "system:coredns"; + }; + rules = [ + { + apiGroups = [ "" ]; + resources = [ "endpoints" "services" "pods" "namespaces" ]; + verbs = [ "list" "watch" ]; + } + { + apiGroups = [ "" ]; + resources = [ "nodes" ]; + verbs = [ "get" ]; + } + ]; + }; + + coredns-crb = { + apiVersion = "rbac.authorization.k8s.io/v1beta1"; + kind = "ClusterRoleBinding"; + metadata = { + annotations = { + "rbac.authorization.kubernetes.io/autoupdate" = "true"; + }; + labels = { + "addonmanager.kubernetes.io/mode" = "Reconcile"; + "k8s-app" = "kube-dns"; + "kubernetes.io/cluster-service" = "true"; + "kubernetes.io/bootstrapping" = "rbac-defaults"; + }; + name = "system:coredns"; + }; + roleRef = { + apiGroup = "rbac.authorization.k8s.io"; + kind = "ClusterRole"; + name = "system:coredns"; + }; + subjects = [ + { + kind = "ServiceAccount"; + name = "coredns"; + namespace = "kube-system"; + } + ]; + }; + + coredns-cm = { + apiVersion = "v1"; + kind = "ConfigMap"; + metadata = { + labels = { + "addonmanager.kubernetes.io/mode" = cfg.reconcileMode; + "k8s-app" = "kube-dns"; + "kubernetes.io/cluster-service" = "true"; + }; + name = "coredns"; + namespace = "kube-system"; + }; + data = { + Corefile = ".:${toString ports.dns} { + errors + health :${toString ports.health} + kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :${toString ports.metrics} + proxy . /etc/resolv.conf + cache 30 + loop + reload + loadbalance + }"; + }; + }; + + coredns-deploy = { + apiVersion = "extensions/v1beta1"; + kind = "Deployment"; + metadata = { + labels = { + "addonmanager.kubernetes.io/mode" = cfg.reconcileMode; + "k8s-app" = "kube-dns"; + "kubernetes.io/cluster-service" = "true"; + "kubernetes.io/name" = "CoreDNS"; + }; + name = "coredns"; + namespace = "kube-system"; + }; + spec = { + replicas = cfg.replicas; + selector = { + matchLabels = { k8s-app = "kube-dns"; }; + }; + strategy = { + rollingUpdate = { maxUnavailable = 1; }; + type = "RollingUpdate"; + }; + template = { + metadata = { + labels = { + k8s-app = "kube-dns"; + }; + }; + spec = { + containers = [ + { + args = [ "-conf" "/etc/coredns/Corefile" ]; + image = with cfg.coredns; "${imageName}:${finalImageTag}"; + imagePullPolicy = "Never"; + livenessProbe = { + failureThreshold = 5; + httpGet = { + path = "/health"; + port = ports.health; + scheme = "HTTP"; + }; + initialDelaySeconds = 60; + successThreshold = 1; + timeoutSeconds = 5; + }; + name = "coredns"; + ports = [ + { + containerPort = ports.dns; + name = "dns"; + protocol = "UDP"; + } + { + containerPort = ports.dns; + name = "dns-tcp"; + protocol = "TCP"; + } + { + containerPort = ports.metrics; + name = "metrics"; + protocol = "TCP"; + } + ]; + resources = { + limits = { + memory = "170Mi"; + }; + requests = { + cpu = "100m"; + memory = "70Mi"; + }; + }; + securityContext = { + allowPrivilegeEscalation = false; + capabilities = { + drop = [ "all" ]; + }; + readOnlyRootFilesystem = true; + }; + volumeMounts = [ + { + mountPath = "/etc/coredns"; + name = "config-volume"; + readOnly = true; + } + ]; + } + ]; + dnsPolicy = "Default"; + nodeSelector = { + "beta.kubernetes.io/os" = "linux"; + }; + serviceAccountName = "coredns"; + tolerations = [ + { + effect = "NoSchedule"; + key = "node-role.kubernetes.io/master"; + } + { + key = "CriticalAddonsOnly"; + operator = "Exists"; + } + ]; + volumes = [ + { + configMap = { + items = [ + { + key = "Corefile"; + path = "Corefile"; + } + ]; + name = "coredns"; + }; + name = "config-volume"; + } + ]; + }; + }; + }; + }; + + coredns-svc = { + apiVersion = "v1"; + kind = "Service"; + metadata = { + annotations = { + "prometheus.io/port" = toString ports.metrics; + "prometheus.io/scrape" = "true"; + }; + labels = { + "addonmanager.kubernetes.io/mode" = "Reconcile"; + "k8s-app" = "kube-dns"; + "kubernetes.io/cluster-service" = "true"; + "kubernetes.io/name" = "CoreDNS"; + }; + name = "kube-dns"; + namespace = "kube-system"; + }; + spec = { + clusterIP = cfg.clusterIp; + ports = [ + { + name = "dns"; + port = 53; + targetPort = ports.dns; + protocol = "UDP"; + } + { + name = "dns-tcp"; + port = 53; + targetPort = ports.dns; + protocol = "TCP"; + } + ]; + selector = { k8s-app = "kube-dns"; }; + }; + }; + }; + + services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp; + }; +} diff --git a/nixos/overlays/kubernetes.nix b/nixos/overlays/kubernetes.nix index e0c1e2c..351e1d4 100644 --- a/nixos/overlays/kubernetes.nix +++ b/nixos/overlays/kubernetes.nix @@ -60,7 +60,7 @@ let cluster.server = cfg.server; }]; users = [{ - name = name'; + name = "kubelet"; user = { client-certificate = cfg.certFile; client-key = cfg.keyFile; @@ -69,9 +69,9 @@ let contexts = [{ context = { cluster = "local"; - user = name'; + user = "kubelet"; }; - current-context = "default"; + current-context = "kubelet-context"; }]; })); @@ -630,13 +630,6 @@ in { type = types.bool; }; - # TODO: remove this deprecated flag - cadvisorPort = mkOption { - description = "Kubernetes kubelet local cadvisor port."; - default = 4194; - type = types.int; - }; - clusterDns = mkOption { description = "Use alternative DNS."; default = "10.1.0.1"; @@ -799,7 +792,7 @@ in { clusterCidr = mkOption { description = "Kubernetes controller manager and proxy CIDR Range for Pods in cluster."; default = "10.1.0.0/16"; - type = types.str; + type = types.nullOr types.str; }; flannel.enable = mkOption { @@ -870,7 +863,6 @@ in { --hostname-override=${cfg.kubelet.hostname} \ --allow-privileged=${boolToString cfg.kubelet.allowPrivileged} \ --root-dir=${cfg.dataDir} \ - --cadvisor_port=${toString cfg.kubelet.cadvisorPort} \ ${optionalString (cfg.kubelet.clusterDns != "") "--cluster-dns=${cfg.kubelet.clusterDns}"} \ ${optionalString (cfg.kubelet.clusterDomain != "") @@ -1034,9 +1026,9 @@ in { ${if (cfg.controllerManager.rootCaFile!=null) then "--root-ca-file=${cfg.controllerManager.rootCaFile}" else "--root-ca-file=/var/run/kubernetes/apiserver.crt"} \ - ${optionalString (cfg.clusterCidr!=null) - "--cluster-cidr=${cfg.clusterCidr}"} \ - --allocate-node-cidrs=true \ + ${if (cfg.clusterCidr!=null) + then "--cluster-cidr=${cfg.clusterCidr} --allocate-node-cidrs=true" + else "--allocate-node-cidrs=false"} \ ${optionalString (cfg.controllerManager.featureGates != []) "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.controllerManager.featureGates}"} \ ${optionalString cfg.verbose "--v=6"} \ diff --git a/nixos/overlays/kubernetes.nix.bak.2 b/nixos/overlays/kubernetes.nix.bak.2 new file mode 100644 index 0000000..2e0677b --- /dev/null +++ b/nixos/overlays/kubernetes.nix.bak.2 @@ -0,0 +1,1216 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.services.kubernetes; + + # YAML config; see: + # https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ + # https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go + # + # TODO: migrate the following flags to this config file + # + # --pod-manifest-path + # --address + # --port + # --tls-cert-file + # --tls-private-key-file + # --client-ca-file + # --authentication-token-webhook + # --authentication-token-webhook-cache-ttl + # --authorization-mode + # --healthz-bind-address + # --healthz-port + # --allow-privileged + # --cluster-dns + # --cluster-domain + # --hairpin-mode + # --feature-gates + kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } '' + echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON { + kind = "KubeletConfiguration"; + apiVersion = "kubelet.config.k8s.io/v1beta1"; + ${if cfg.kubelet.applyManifests then "staticPodPath" else null} = + manifests; + })} + ''; + + infraContainer = pkgs.dockerTools.buildImage { + name = "pause"; + tag = "latest"; + contents = cfg.package.pause; + config.Cmd = "/bin/pause"; + }; + + mkKubeConfig = name: cfg: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON ( + let name' = + if name == "kubelet" + then "system:node:${config.services.kubernetes.kubelet.hostname}" + else if name == "kube-proxy" + then "system:kube-proxy" + else name; + in + { + apiVersion = "v1"; + kind = "Config"; + clusters = [{ + name = "local"; + cluster.certificate-authority = cfg.caFile; + cluster.server = cfg.server; + }]; + users = [{ + name = name'; + user = { + client-certificate = cfg.certFile; + client-key = cfg.keyFile; + }; + }]; + contexts = [{ + context = { + cluster = "local"; + user = name'; + }; + current-context = "default"; + }]; + })); + + mkKubeConfigOptions = prefix: { + server = mkOption { + description = "${prefix} kube-apiserver server address."; + default = "http://${if cfg.apiserver.advertiseAddress != null + then cfg.apiserver.advertiseAddress + else "127.0.0.1"}:${toString cfg.apiserver.port}"; + type = types.str; + }; + + caFile = mkOption { + description = "${prefix} certificate authority file used to connect to kube-apiserver."; + type = types.nullOr types.path; + default = cfg.caFile; + }; + + certFile = mkOption { + description = "${prefix} client certificate file used to connect to kube-apiserver."; + type = types.nullOr types.path; + default = null; + }; + + keyFile = mkOption { + description = "${prefix} client key file used to connect to kube-apiserver."; + type = types.nullOr types.path; + default = null; + }; + }; + + kubeConfigDefaults = { + server = mkDefault cfg.kubeconfig.server; + caFile = mkDefault cfg.kubeconfig.caFile; + certFile = mkDefault cfg.kubeconfig.certFile; + keyFile = mkDefault cfg.kubeconfig.keyFile; + }; + + cniConfig = + if cfg.kubelet.cni.config != [] && !(isNull cfg.kubelet.cni.configDir) then + throw "Verbatim CNI-config and CNI configDir cannot both be set." + else if !(isNull cfg.kubelet.cni.configDir) then + cfg.kubelet.cni.configDir + else + (pkgs.buildEnv { + name = "kubernetes-cni-config"; + paths = imap (i: entry: + pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry) + ) cfg.kubelet.cni.config; + }); + + manifests = pkgs.buildEnv { + name = "kubernetes-manifests"; + paths = mapAttrsToList (name: manifest: + pkgs.writeTextDir "${name}.json" (builtins.toJSON manifest) + ) cfg.kubelet.manifests; + }; + + addons = pkgs.runCommand "kubernetes-addons" { } '' + mkdir -p $out + # since we are mounting the addons to the addon manager, they need to be copied + ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon: + pkgs.writeTextDir "${name}.json" (builtins.toJSON addon) + ) (cfg.addonManager.addons))} + ''; + + taintOptions = { name, ... }: { + options = { + key = mkOption { + description = "Key of taint."; + default = name; + type = types.str; + }; + value = mkOption { + description = "Value of taint."; + type = types.str; + }; + effect = mkOption { + description = "Effect of taint."; + example = "NoSchedule"; + type = types.enum ["NoSchedule" "PreferNoSchedule" "NoExecute"]; + }; + }; + }; + + taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.kubelet.taints); + + # needed for flannel to pass options to docker + mkDockerOpts = pkgs.runCommand "mk-docker-opts" { + buildInputs = [ pkgs.makeWrapper ]; + } '' + mkdir -p $out + cp ${pkgs.kubernetes.src}/cluster/centos/node/bin/mk-docker-opts.sh $out/mk-docker-opts.sh + + # bashInteractive needed for `compgen` + makeWrapper ${pkgs.bashInteractive}/bin/bash $out/mk-docker-opts --add-flags "$out/mk-docker-opts.sh" + ''; +in { + + ###### interface + + options.services.kubernetes = { + roles = mkOption { + description = '' + Kubernetes role that this machine should take. + + Master role will enable etcd, apiserver, scheduler and controller manager + services. Node role will enable etcd, docker, kubelet and proxy services. + ''; + default = []; + type = types.listOf (types.enum ["master" "node"]); + }; + + package = mkOption { + description = "Kubernetes package to use."; + type = types.package; + default = pkgs.kubernetes; + defaultText = "pkgs.kubernetes"; + }; + + verbose = mkOption { + description = "Kubernetes enable verbose mode for debugging."; + default = false; + type = types.bool; + }; + + etcd = { + servers = mkOption { + description = "List of etcd servers. By default etcd is started, except if this option is changed."; + default = ["http://127.0.0.1:2379"]; + type = types.listOf types.str; + }; + + keyFile = mkOption { + description = "Etcd key file."; + default = null; + type = types.nullOr types.path; + }; + + certFile = mkOption { + description = "Etcd cert file."; + default = null; + type = types.nullOr types.path; + }; + + caFile = mkOption { + description = "Etcd ca file."; + default = cfg.caFile; + type = types.nullOr types.path; + }; + }; + + kubeconfig = mkKubeConfigOptions "Default kubeconfig"; + + caFile = mkOption { + description = "Default kubernetes certificate authority"; + type = types.nullOr types.path; + default = null; + }; + + dataDir = mkOption { + description = "Kubernetes root directory for managing kubelet files."; + default = "/var/lib/kubernetes"; + type = types.path; + }; + + featureGates = mkOption { + description = "List set of feature gates"; + default = []; + type = types.listOf types.str; + }; + + apiserver = { + enable = mkOption { + description = "Whether to enable Kubernetes apiserver."; + default = false; + type = types.bool; + }; + + featureGates = mkOption { + description = "List set of feature gates"; + default = cfg.featureGates; + type = types.listOf types.str; + }; + + bindAddress = mkOption { + description = '' + The IP address on which to listen for the --secure-port port. + The associated interface(s) must be reachable by the rest + of the cluster, and by CLI/web clients. + ''; + default = "0.0.0.0"; + type = types.str; + }; + + advertiseAddress = mkOption { + description = '' + Kubernetes apiserver IP address on which to advertise the apiserver + to members of the cluster. This address must be reachable by the rest + of the cluster. + ''; + default = null; + type = types.nullOr types.str; + }; + + storageBackend = mkOption { + description = '' + Kubernetes apiserver storage backend. + ''; + default = "etcd3"; + type = types.enum ["etcd2" "etcd3"]; + }; + + port = mkOption { + description = "Kubernetes apiserver listening port."; + default = 8080; + type = types.int; + }; + + securePort = mkOption { + description = "Kubernetes apiserver secure port."; + default = 443; + type = types.int; + }; + + tlsCertFile = mkOption { + description = "Kubernetes apiserver certificate file."; + default = null; + type = types.nullOr types.path; + }; + + tlsKeyFile = mkOption { + description = "Kubernetes apiserver private key file."; + default = null; + type = types.nullOr types.path; + }; + + clientCaFile = mkOption { + description = "Kubernetes apiserver CA file for client auth."; + default = cfg.caFile; + type = types.nullOr types.path; + }; + + tokenAuthFile = mkOption { + description = '' + Kubernetes apiserver token authentication file. See + + ''; + default = null; + type = types.nullOr types.path; + }; + + basicAuthFile = mkOption { + description = '' + Kubernetes apiserver basic authentication file. See + + ''; + default = pkgs.writeText "users" '' + kubernetes,admin,0 + ''; + type = types.nullOr types.path; + }; + + authorizationMode = mkOption { + description = '' + Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See + + ''; + default = ["RBAC" "Node"]; + type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]); + }; + + authorizationPolicy = mkOption { + description = '' + Kubernetes apiserver authorization policy file. See + + ''; + default = []; + type = types.listOf types.attrs; + }; + + webhookConfig = mkOption { + description = '' + Kubernetes apiserver Webhook config file. It uses the kubeconfig file format. + See + ''; + default = null; + type = types.nullOr types.path; + }; + + allowPrivileged = mkOption { + description = "Whether to allow privileged containers on Kubernetes."; + default = true; + type = types.bool; + }; + + serviceClusterIpRange = mkOption { + description = '' + A CIDR notation IP range from which to assign service cluster IPs. + This must not overlap with any IP ranges assigned to nodes for pods. + ''; + default = "10.0.0.0/24"; + type = types.str; + }; + + runtimeConfig = mkOption { + description = '' + Api runtime configuration. See + + ''; + default = "authentication.k8s.io/v1beta1=true"; + example = "api/all=false,api/v1=true"; + type = types.str; + }; + + enableAdmissionPlugins = mkOption { + description = '' + Kubernetes admission control plugins to enable. See + + ''; + default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"]; + example = [ + "NamespaceLifecycle" "NamespaceExists" "LimitRanger" + "SecurityContextDeny" "ServiceAccount" "ResourceQuota" + "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass" + ]; + type = types.listOf types.str; + }; + + disableAdmissionPlugins = mkOption { + description = '' + Kubernetes admission control plugins to disable. See + + ''; + default = []; + type = types.listOf types.str; + }; + + serviceAccountKeyFile = mkOption { + description = '' + Kubernetes apiserver PEM-encoded x509 RSA private or public key file, + used to verify ServiceAccount tokens. By default tls private key file + is used. + ''; + default = null; + type = types.nullOr types.path; + }; + + kubeletClientCaFile = mkOption { + description = "Path to a cert file for connecting to kubelet."; + default = cfg.caFile; + type = types.nullOr types.path; + }; + + kubeletClientCertFile = mkOption { + description = "Client certificate to use for connections to kubelet."; + default = null; + type = types.nullOr types.path; + }; + + kubeletClientKeyFile = mkOption { + description = "Key to use for connections to kubelet."; + default = null; + type = types.nullOr types.path; + }; + + kubeletHttps = mkOption { + description = "Whether to use https for connections to kubelet."; + default = true; + type = types.bool; + }; + + extraOpts = mkOption { + description = "Kubernetes apiserver extra command line options."; + default = ""; + type = types.str; + }; + }; + + scheduler = { + enable = mkOption { + description = "Whether to enable Kubernetes scheduler."; + default = false; + type = types.bool; + }; + + featureGates = mkOption { + description = "List set of feature gates"; + default = cfg.featureGates; + type = types.listOf types.str; + }; + + address = mkOption { + description = "Kubernetes scheduler listening address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Kubernetes scheduler listening port."; + default = 10251; + type = types.int; + }; + + leaderElect = mkOption { + description = "Whether to start leader election before executing main loop."; + type = types.bool; + default = true; + }; + + kubeconfig = mkKubeConfigOptions "Kubernetes scheduler"; + + extraOpts = mkOption { + description = "Kubernetes scheduler extra command line options."; + default = ""; + type = types.str; + }; + }; + + controllerManager = { + enable = mkOption { + description = "Whether to enable Kubernetes controller manager."; + default = false; + type = types.bool; + }; + + featureGates = mkOption { + description = "List set of feature gates"; + default = cfg.featureGates; + type = types.listOf types.str; + }; + + address = mkOption { + description = "Kubernetes controller manager listening address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Kubernetes controller manager listening port."; + default = 10252; + type = types.int; + }; + + leaderElect = mkOption { + description = "Whether to start leader election before executing main loop."; + type = types.bool; + default = true; + }; + + serviceAccountKeyFile = mkOption { + description = '' + Kubernetes controller manager PEM-encoded private RSA key file used to + sign service account tokens + ''; + default = null; + type = types.nullOr types.path; + }; + + rootCaFile = mkOption { + description = '' + Kubernetes controller manager certificate authority file included in + service account's token secret. + ''; + default = cfg.caFile; + type = types.nullOr types.path; + }; + + kubeconfig = mkKubeConfigOptions "Kubernetes controller manager"; + + extraOpts = mkOption { + description = "Kubernetes controller manager extra command line options."; + default = ""; + type = types.str; + }; + }; + + kubelet = { + enable = mkOption { + description = "Whether to enable Kubernetes kubelet."; + default = false; + type = types.bool; + }; + + featureGates = mkOption { + description = "List set of feature gates"; + default = cfg.featureGates; + type = types.listOf types.str; + }; + + seedDockerImages = mkOption { + description = "List of docker images to preload on system"; + default = []; + type = types.listOf types.package; + }; + + registerNode = mkOption { + description = "Whether to auto register kubelet with API server."; + default = true; + type = types.bool; + }; + + address = mkOption { + description = "Kubernetes kubelet info server listening address."; + default = "0.0.0.0"; + type = types.str; + }; + + port = mkOption { + description = "Kubernetes kubelet info server listening port."; + default = 10250; + type = types.int; + }; + + tlsCertFile = mkOption { + description = "File containing x509 Certificate for HTTPS."; + default = null; + type = types.nullOr types.path; + }; + + tlsKeyFile = mkOption { + description = "File containing x509 private key matching tlsCertFile."; + default = null; + type = types.nullOr types.path; + }; + + clientCaFile = mkOption { + description = "Kubernetes apiserver CA file for client authentication."; + default = cfg.caFile; + type = types.nullOr types.path; + }; + + healthz = { + bind = mkOption { + description = "Kubernetes kubelet healthz listening address."; + default = "127.0.0.1"; + type = types.str; + }; + + port = mkOption { + description = "Kubernetes kubelet healthz port."; + default = 10248; + type = types.int; + }; + }; + + hostname = mkOption { + description = "Kubernetes kubelet hostname override."; + default = config.networking.hostName; + type = types.str; + }; + + allowPrivileged = mkOption { + description = "Whether to allow Kubernetes containers to request privileged mode."; + default = true; + type = types.bool; + }; + + # TODO: remove this deprecated flag + # cadvisorPort = mkOption { + # description = "Kubernetes kubelet local cadvisor port."; + # default = 4194; + # type = types.int; + # }; + + clusterDns = mkOption { + description = "Use alternative DNS."; + default = "10.1.0.1"; + type = types.str; + }; + + clusterDomain = mkOption { + description = "Use alternative domain."; + default = config.services.kubernetes.addons.dns.clusterDomain; + type = types.str; + }; + + networkPlugin = mkOption { + description = "Network plugin to use by Kubernetes."; + type = types.nullOr (types.enum ["cni" "kubenet"]); + default = "kubenet"; + }; + + cni = { + packages = mkOption { + description = "List of network plugin packages to install."; + type = types.listOf types.package; + default = []; + }; + + config = mkOption { + description = "Kubernetes CNI configuration."; + type = types.listOf types.attrs; + default = []; + example = literalExample '' + [{ + "cniVersion": "0.2.0", + "name": "mynet", + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "subnet": "10.22.0.0/16", + "routes": [ + { "dst": "0.0.0.0/0" } + ] + } + } { + "cniVersion": "0.2.0", + "type": "loopback" + }] + ''; + }; + + configDir = mkOption { + description = "Path to Kubernetes CNI configuration directory."; + type = types.nullOr types.path; + default = null; + }; + }; + + manifests = mkOption { + description = "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)"; + type = types.attrsOf types.attrs; + default = {}; + }; + + applyManifests = mkOption { + description = "Whether to apply manifests (this is true for master node)."; + default = false; + type = types.bool; + }; + + unschedulable = mkOption { + description = "Whether to set node taint to unschedulable=true as it is the case of node that has only master role."; + default = false; + type = types.bool; + }; + + taints = mkOption { + description = "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/)."; + default = {}; + type = types.attrsOf (types.submodule [ taintOptions ]); + }; + + nodeIp = mkOption { + description = "IP address of the node. If set, kubelet will use this IP address for the node."; + default = null; + type = types.nullOr types.str; + }; + + kubeconfig = mkKubeConfigOptions "Kubelet"; + + extraOpts = mkOption { + description = "Kubernetes kubelet extra command line options."; + default = ""; + type = types.str; + }; + }; + + proxy = { + enable = mkOption { + description = "Whether to enable Kubernetes proxy."; + default = false; + type = types.bool; + }; + + featureGates = mkOption { + description = "List set of feature gates"; + default = cfg.featureGates; + type = types.listOf types.str; + }; + + address = mkOption { + description = "Kubernetes proxy listening address."; + default = "0.0.0.0"; + type = types.str; + }; + + kubeconfig = mkKubeConfigOptions "Kubernetes proxy"; + + extraOpts = mkOption { + description = "Kubernetes proxy extra command line options."; + default = ""; + type = types.str; + }; + }; + + addonManager = { + enable = mkOption { + description = "Whether to enable Kubernetes addon manager."; + default = false; + type = types.bool; + }; + + addons = mkOption { + description = "Kubernetes addons (any kind of Kubernetes resource can be an addon)."; + default = { }; + type = types.attrsOf (types.either types.attrs (types.listOf types.attrs)); + example = literalExample '' + { + "my-service" = { + "apiVersion" = "v1"; + "kind" = "Service"; + "metadata" = { + "name" = "my-service"; + "namespace" = "default"; + }; + "spec" = { ... }; + }; + } + // import { cfg = config.services.kubernetes; }; + ''; + }; + }; + + path = mkOption { + description = "Packages added to the services' PATH environment variable. Both the bin and sbin subdirectories of each package are added."; + type = types.listOf types.package; + default = []; + }; + + clusterCidr = mkOption { + description = "Kubernetes controller manager and proxy CIDR Range for Pods in cluster."; + default = "10.1.0.0/16"; + type = types.str; + }; + + flannel.enable = mkOption { + description = "Whether to enable flannel networking"; + default = false; + type = types.bool; + }; + + }; + + ###### implementation + + config = mkMerge [ + (mkIf cfg.kubelet.enable { + services.kubernetes.kubelet.seedDockerImages = [infraContainer]; + + systemd.services.kubelet-bootstrap = { + description = "Boostrap Kubelet"; + wantedBy = ["kubernetes.target"]; + after = ["docker.service" "network.target"]; + path = with pkgs; [ docker ]; + script = '' + ${concatMapStrings (img: '' + echo "Seeding docker image: ${img}" + docker load <${img} + '') cfg.kubelet.seedDockerImages} + + rm /opt/cni/bin/* || true + ${concatMapStrings (package: '' + echo "Linking cni package: ${package}" + ln -fs ${package}/bin/* /opt/cni/bin + '') cfg.kubelet.cni.packages} + ''; + serviceConfig = { + Slice = "kubernetes.slice"; + Type = "oneshot"; + }; + }; + + systemd.services.kubelet = { + description = "Kubernetes Kubelet Service"; + wantedBy = [ "kubernetes.target" ]; + after = [ "network.target" "docker.service" "kube-apiserver.service" "kubelet-bootstrap.service" ]; + path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ cfg.path; + serviceConfig = { + Slice = "kubernetes.slice"; + CPUAccounting = true; + MemoryAccounting = true; + ExecStart = ''${cfg.package}/bin/kubelet \ + ${optionalString (taints != "") + "--register-with-taints=${taints}"} \ + --kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \ + --config=${kubeletConfig} \ + --address=${cfg.kubelet.address} \ + --port=${toString cfg.kubelet.port} \ + --register-node=${boolToString cfg.kubelet.registerNode} \ + ${optionalString (cfg.kubelet.tlsCertFile != null) + "--tls-cert-file=${cfg.kubelet.tlsCertFile}"} \ + ${optionalString (cfg.kubelet.tlsKeyFile != null) + "--tls-private-key-file=${cfg.kubelet.tlsKeyFile}"} \ + ${optionalString (cfg.kubelet.clientCaFile != null) + "--client-ca-file=${cfg.kubelet.clientCaFile}"} \ + --authentication-token-webhook \ + --authentication-token-webhook-cache-ttl="10s" \ + --authorization-mode=Webhook \ + --healthz-bind-address=${cfg.kubelet.healthz.bind} \ + --healthz-port=${toString cfg.kubelet.healthz.port} \ + --hostname-override=${cfg.kubelet.hostname} \ + --allow-privileged=${boolToString cfg.kubelet.allowPrivileged} \ + --root-dir=${cfg.dataDir} \ + --cadvisor_port=${toString cfg.kubelet.cadvisorPort} \ + ${optionalString (cfg.kubelet.clusterDns != "") + "--cluster-dns=${cfg.kubelet.clusterDns}"} \ + ${optionalString (cfg.kubelet.clusterDomain != "") + "--cluster-domain=${cfg.kubelet.clusterDomain}"} \ + --pod-infra-container-image=pause \ + ${optionalString (cfg.kubelet.networkPlugin != null) + "--network-plugin=${cfg.kubelet.networkPlugin}"} \ + --cni-conf-dir=${cniConfig} \ + --hairpin-mode=hairpin-veth \ + ${optionalString (cfg.kubelet.nodeIp != null) + "--node-ip=${cfg.kubelet.nodeIp}"} \ + ${optionalString (cfg.kubelet.featureGates != []) + "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.kubelet.featureGates}"} \ + ${optionalString cfg.verbose "--v=6 --log_flush_frequency=1s"} \ + ${cfg.kubelet.extraOpts} + ''; + WorkingDirectory = cfg.dataDir; + }; + }; + + # Allways include cni plugins + services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins]; + + boot.kernelModules = ["br_netfilter"]; + + services.kubernetes.kubelet.kubeconfig = kubeConfigDefaults; + }) + + (mkIf (cfg.kubelet.applyManifests && cfg.kubelet.enable) { + environment.etc = mapAttrs' (name: manifest: + nameValuePair "kubernetes/manifests/${name}.json" { + text = builtins.toJSON manifest; + mode = "0755"; + } + ) cfg.kubelet.manifests; + }) + + (mkIf (cfg.kubelet.unschedulable && cfg.kubelet.enable) { + services.kubernetes.kubelet.taints.unschedulable = { + value = "true"; + effect = "NoSchedule"; + }; + }) + + (mkIf cfg.apiserver.enable { + systemd.services.kube-apiserver = { + description = "Kubernetes APIServer Service"; + wantedBy = [ "kubernetes.target" ]; + after = [ "network.target" "docker.service" ]; + serviceConfig = { + Slice = "kubernetes.slice"; + ExecStart = ''${cfg.package}/bin/kube-apiserver \ + --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \ + ${optionalString (cfg.etcd.caFile != null) + "--etcd-cafile=${cfg.etcd.caFile}"} \ + ${optionalString (cfg.etcd.certFile != null) + "--etcd-certfile=${cfg.etcd.certFile}"} \ + ${optionalString (cfg.etcd.keyFile != null) + "--etcd-keyfile=${cfg.etcd.keyFile}"} \ + --insecure-port=${toString cfg.apiserver.port} \ + --bind-address=${cfg.apiserver.bindAddress} \ + ${optionalString (cfg.apiserver.advertiseAddress != null) + "--advertise-address=${cfg.apiserver.advertiseAddress}"} \ + --allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\ + ${optionalString (cfg.apiserver.tlsCertFile != null) + "--tls-cert-file=${cfg.apiserver.tlsCertFile}"} \ + ${optionalString (cfg.apiserver.tlsKeyFile != null) + "--tls-private-key-file=${cfg.apiserver.tlsKeyFile}"} \ + ${optionalString (cfg.apiserver.tokenAuthFile != null) + "--token-auth-file=${cfg.apiserver.tokenAuthFile}"} \ + ${optionalString (cfg.apiserver.basicAuthFile != null) + "--basic-auth-file=${cfg.apiserver.basicAuthFile}"} \ + --kubelet-https=${if cfg.apiserver.kubeletHttps then "true" else "false"} \ + ${optionalString (cfg.apiserver.kubeletClientCaFile != null) + "--kubelet-certificate-authority=${cfg.apiserver.kubeletClientCaFile}"} \ + ${optionalString (cfg.apiserver.kubeletClientCertFile != null) + "--kubelet-client-certificate=${cfg.apiserver.kubeletClientCertFile}"} \ + ${optionalString (cfg.apiserver.kubeletClientKeyFile != null) + "--kubelet-client-key=${cfg.apiserver.kubeletClientKeyFile}"} \ + ${optionalString (cfg.apiserver.clientCaFile != null) + "--client-ca-file=${cfg.apiserver.clientCaFile}"} \ + --authorization-mode=${concatStringsSep "," cfg.apiserver.authorizationMode} \ + ${optionalString (elem "ABAC" cfg.apiserver.authorizationMode) + "--authorization-policy-file=${ + pkgs.writeText "kube-auth-policy.jsonl" + (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy) + }" + } \ + ${optionalString (elem "Webhook" cfg.apiserver.authorizationMode) + "--authorization-webhook-config-file=${cfg.apiserver.webhookConfig}" + } \ + --secure-port=${toString cfg.apiserver.securePort} \ + --service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \ + ${optionalString (cfg.apiserver.runtimeConfig != "") + "--runtime-config=${cfg.apiserver.runtimeConfig}"} \ + --enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \ + --disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \ + ${optionalString (cfg.apiserver.serviceAccountKeyFile!=null) + "--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \ + ${optionalString cfg.verbose "--v=6"} \ + ${optionalString cfg.verbose "--log-flush-frequency=1s"} \ + --storage-backend=${cfg.apiserver.storageBackend} \ + ${optionalString (cfg.kubelet.featureGates != []) + "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.kubelet.featureGates}"} \ + ${cfg.apiserver.extraOpts} + ''; + WorkingDirectory = cfg.dataDir; + User = "kubernetes"; + Group = "kubernetes"; + AmbientCapabilities = "cap_net_bind_service"; + Restart = "on-failure"; + RestartSec = 5; + }; + }; + }) + + (mkIf cfg.scheduler.enable { + systemd.services.kube-scheduler = { + description = "Kubernetes Scheduler Service"; + wantedBy = [ "kubernetes.target" ]; + after = [ "kube-apiserver.service" ]; + serviceConfig = { + Slice = "kubernetes.slice"; + ExecStart = ''${cfg.package}/bin/kube-scheduler \ + --address=${cfg.scheduler.address} \ + --port=${toString cfg.scheduler.port} \ + --leader-elect=${boolToString cfg.scheduler.leaderElect} \ + --kubeconfig=${mkKubeConfig "kube-scheduler" cfg.scheduler.kubeconfig} \ + ${optionalString cfg.verbose "--v=6"} \ + ${optionalString cfg.verbose "--log-flush-frequency=1s"} \ + ${optionalString (cfg.scheduler.featureGates != []) + "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.scheduler.featureGates}"} \ + ${cfg.scheduler.extraOpts} + ''; + WorkingDirectory = cfg.dataDir; + User = "kubernetes"; + Group = "kubernetes"; + }; + }; + + services.kubernetes.scheduler.kubeconfig = kubeConfigDefaults; + }) + + (mkIf cfg.controllerManager.enable { + systemd.services.kube-controller-manager = { + description = "Kubernetes Controller Manager Service"; + wantedBy = [ "kubernetes.target" ]; + after = [ "kube-apiserver.service" ]; + serviceConfig = { + RestartSec = "30s"; + Restart = "on-failure"; + Slice = "kubernetes.slice"; + ExecStart = ''${cfg.package}/bin/kube-controller-manager \ + --address=${cfg.controllerManager.address} \ + --port=${toString cfg.controllerManager.port} \ + --kubeconfig=${mkKubeConfig "kube-controller-manager" cfg.controllerManager.kubeconfig} \ + --leader-elect=${boolToString cfg.controllerManager.leaderElect} \ + ${if (cfg.controllerManager.serviceAccountKeyFile!=null) + then "--service-account-private-key-file=${cfg.controllerManager.serviceAccountKeyFile}" + else "--service-account-private-key-file=/var/run/kubernetes/apiserver.key"} \ + ${if (cfg.controllerManager.rootCaFile!=null) + then "--root-ca-file=${cfg.controllerManager.rootCaFile}" + else "--root-ca-file=/var/run/kubernetes/apiserver.crt"} \ + ${optionalString (cfg.clusterCidr!=null) + "--cluster-cidr=${cfg.clusterCidr}"} \ + --allocate-node-cidrs=true \ + ${optionalString (cfg.controllerManager.featureGates != []) + "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.controllerManager.featureGates}"} \ + ${optionalString cfg.verbose "--v=6"} \ + ${optionalString cfg.verbose "--log-flush-frequency=1s"} \ + ${cfg.controllerManager.extraOpts} + ''; + WorkingDirectory = cfg.dataDir; + User = "kubernetes"; + Group = "kubernetes"; + }; + path = cfg.path; + }; + + services.kubernetes.controllerManager.kubeconfig = kubeConfigDefaults; + }) + + (mkIf cfg.proxy.enable { + systemd.services.kube-proxy = { + description = "Kubernetes Proxy Service"; + wantedBy = [ "kubernetes.target" ]; + after = [ "kube-apiserver.service" ]; + path = [pkgs.iptables pkgs.conntrack_tools]; + serviceConfig = { + Slice = "kubernetes.slice"; + ExecStart = ''${cfg.package}/bin/kube-proxy \ + --kubeconfig=${mkKubeConfig "kube-proxy" cfg.proxy.kubeconfig} \ + --bind-address=${cfg.proxy.address} \ + ${optionalString (cfg.proxy.featureGates != []) + "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.proxy.featureGates}"} \ + ${optionalString cfg.verbose "--v=6"} \ + ${optionalString cfg.verbose "--log-flush-frequency=1s"} \ + ${optionalString (cfg.clusterCidr!=null) + "--cluster-cidr=${cfg.clusterCidr}"} \ + ${cfg.proxy.extraOpts} + ''; + WorkingDirectory = cfg.dataDir; + }; + }; + + # kube-proxy needs iptables + networking.firewall.enable = mkDefault true; + + services.kubernetes.proxy.kubeconfig = kubeConfigDefaults; + }) + + (mkIf (any (el: el == "master") cfg.roles) { + virtualisation.docker.enable = mkDefault true; + services.kubernetes.kubelet.enable = mkDefault true; + services.kubernetes.kubelet.allowPrivileged = mkDefault true; + services.kubernetes.kubelet.applyManifests = mkDefault true; + services.kubernetes.apiserver.enable = mkDefault true; + services.kubernetes.scheduler.enable = mkDefault true; + services.kubernetes.controllerManager.enable = mkDefault true; + services.etcd.enable = mkDefault (cfg.etcd.servers == ["http://127.0.0.1:2379"]); + services.kubernetes.addonManager.enable = mkDefault true; + services.kubernetes.proxy.enable = mkDefault true; + }) + + # if this node is only a master make it unschedulable by default + (mkIf (all (el: el == "master") cfg.roles) { + services.kubernetes.kubelet.unschedulable = mkDefault true; + }) + + (mkIf (any (el: el == "node") cfg.roles) { + virtualisation.docker = { + enable = mkDefault true; + + # kubernetes needs access to logs + logDriver = mkDefault "json-file"; + + # iptables must be disabled for kubernetes + extraOptions = "--iptables=false --ip-masq=false"; + }; + + services.kubernetes.kubelet.enable = mkDefault true; + services.kubernetes.proxy.enable = mkDefault true; + }) + + (mkIf cfg.addonManager.enable { + environment.etc."kubernetes/addons".source = "${addons}/"; + + systemd.services.kube-addon-manager = { + description = "Kubernetes addon manager"; + wantedBy = [ "kubernetes.target" ]; + after = [ "kube-apiserver.service" ]; + environment.ADDON_PATH = "/etc/kubernetes/addons/"; + path = [ pkgs.gawk ]; + serviceConfig = { + Slice = "kubernetes.slice"; + ExecStart = "${cfg.package}/bin/kube-addons"; + WorkingDirectory = cfg.dataDir; + User = "kubernetes"; + Group = "kubernetes"; + }; + }; + }) + + (mkIf ( + cfg.apiserver.enable || + cfg.scheduler.enable || + cfg.controllerManager.enable || + cfg.kubelet.enable || + cfg.proxy.enable + ) { + systemd.targets.kubernetes = { + description = "Kubernetes"; + wantedBy = [ "multi-user.target" ]; + }; + + systemd.tmpfiles.rules = [ + "d /opt/cni/bin 0755 root root -" + "d /var/run/kubernetes 0755 kubernetes kubernetes -" + "d /var/lib/kubernetes 0755 kubernetes kubernetes -" + ]; + + environment.systemPackages = [ cfg.package ]; + users.users = singleton { + name = "kubernetes"; + uid = config.ids.uids.kubernetes; + description = "Kubernetes user"; + extraGroups = [ "docker" ]; + group = "kubernetes"; + home = cfg.dataDir; + createHome = true; + }; + users.groups.kubernetes.gid = config.ids.gids.kubernetes; + + # dns addon is enabled by default + services.kubernetes.addons.dns.enable = mkDefault true; + }) + + (mkIf cfg.flannel.enable { + services.flannel = { + enable = mkDefault true; + network = mkDefault cfg.clusterCidr; + etcd = mkDefault { + endpoints = cfg.etcd.servers; + inherit (cfg.etcd) caFile certFile keyFile; + }; + }; + + services.kubernetes.kubelet = { + networkPlugin = mkDefault "cni"; + cni.config = mkDefault [{ + name = "mynet"; + type = "flannel"; + delegate = { + isDefaultGateway = true; + bridge = "docker0"; + }; + }]; + }; + + systemd.services."mk-docker-opts" = { + description = "Pre-Docker Actions"; + wantedBy = [ "flannel.service" ]; + before = [ "docker.service" ]; + after = [ "flannel.service" ]; + path = [ pkgs.gawk pkgs.gnugrep ]; + script = '' + mkdir -p /run/flannel + ${mkDockerOpts}/mk-docker-opts -d /run/flannel/docker + ''; + serviceConfig.Type = "oneshot"; + }; + systemd.services.docker.serviceConfig.EnvironmentFile = "/run/flannel/docker"; + + # read environment variables generated by mk-docker-opts + virtualisation.docker.extraOptions = "$DOCKER_OPTS"; + + networking.firewall.allowedUDPPorts = [ + 8285 # flannel udp + 8472 # flannel vxlan + ]; + }) + ]; +} diff --git a/nixos/overlays/overlays.nix b/nixos/overlays/overlays.nix index eb92c98..444ec8a 100644 --- a/nixos/overlays/overlays.nix +++ b/nixos/overlays/overlays.nix @@ -1,6 +1,6 @@ self: super: { - # glusterfs = super.glusterfs.overrideAttrs (old: { - # buildInputs = old.buildInputs ++ [ self.lvm2 ]; - # }); - super.config.services.kubernetes = super.callPackage ./kubernetes.nix {}; + super.config.services.kubernetes.addons.dns = + super.callPackage ./dns.nix {}; + super.config.services.kubernetes.addons.dashboard = + super.callPackage ./dashboard.nix {}; }