Secure certificates after generation

This commit is contained in:
Jonas Juselius
2019-02-23 15:34:28 +01:00
parent cce9aa825b
commit 66d29be22c
19 changed files with 2098 additions and 144 deletions

1
.gitignore vendored
View File

@@ -2,3 +2,4 @@
*.csr *.csr
result result
result-* result-*
gcroots/

View File

@@ -5,5 +5,18 @@ if [ $# = 0 ]; then
exit 1 exit 1
fi fi
nixops modify -d $1 $1.nix if [ ! -f $1/deployment.nix ]; then
echo "error: $1 does not contain a deployment"
exit 1
fi
mkdir -p $1/gcroots
echo "--- Securing certifiates"
nix-build -o $1/gcroots/certs $1/build.nix
echo "--- Updating deployment"
nixops modify -d $1 $1/deployment.nix
echo "--- Deploying $1"
nixops deploy -d $* nixops deploy -d $*

View File

@@ -1,5 +0,0 @@
with import <nixpkgs> {};
let
pki = pkgs.callPackage ./lib/pki.nix {};
in
pki.initca

2
kube0/build.nix Normal file
View File

@@ -0,0 +1,2 @@
with import <nixpkgs> {};
pkgs.callPackage ./certs.nix {}

36
kube0/certs.nix Normal file
View File

@@ -0,0 +1,36 @@
{ pkgs, ...}:
let
pki = pkgs.callPackage ../lib/pki.nix {};
in
{
initca = pki.initca;
ca = pki.ca;
apiserver = pki.apiserver ''
"10.253.18.100",
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default.svc",
"etcd0",
"fs0-2",
"k0-0"
'';
kube-proxy = pki.kube-proxy;
admin = pki.admin;
etcd = pki.etcd ''
"etcd0",
"etcd1",
"etcd2",
"10.253.18.100",
"10.253.18.101",
"10.253.18.102",
"127.0.0.1"
'';
k0-0 = pki.worker { name = "k0-0"; ip = "10.253.18.100"; };
k0-1 = pki.worker { name = "k0-1"; ip = "10.253.18.101"; };
k0-2 = pki.worker { name = "k0-2"; ip = "10.253.18.102"; };
k0-3 = pki.worker { name = "k0-3"; ip = "10.253.18.103"; };
k0-4 = pki.worker { name = "k0-4"; ip = "10.253.18.107"; };
k0-5 = pki.worker { name = "k0-5"; ip = "10.253.18.108"; };
}

View File

@@ -1,42 +1,12 @@
with import <nixpkgs> {}; with import <nixpkgs> {};
let let
pki = pkgs.callPackage ./lib/pki.nix {}; certs = pkgs.callPackage ./certs.nix {};
certs = { pki = pkgs.callPackage ../lib/pki.nix {};
ca = pki.ca; cluster = callPackage ../lib/k8s.nix {
apiserver = pki.apiserver ''
"10.253.18.100",
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default.svc",
"etcd0",
"fs0-2",
"k0-0"
'';
kube-proxy = pki.kube-proxy;
admin = pki.admin;
etcd = pki.etcd ''
"etcd0",
"etcd1",
"etcd2",
"10.253.18.100",
"10.253.18.101",
"10.253.18.102",
"127.0.0.1"
'';
k0-0 = pki.worker { name = "k0-0"; ip = "10.253.18.100"; };
k0-1 = pki.worker { name = "k0-1"; ip = "10.253.18.101"; };
k0-2 = pki.worker { name = "k0-2"; ip = "10.253.18.102"; };
k0-3 = pki.worker { name = "k0-3"; ip = "10.253.18.103"; };
k0-4 = pki.worker { name = "k0-4"; ip = "10.253.18.107"; };
k0-5 = pki.worker { name = "k0-5"; ip = "10.253.18.108"; };
};
cluster = callPackage ./lib/k8s.nix {
masterNode = "10.253.18.100"; masterNode = "10.253.18.100";
etcdNodes = [ "etcd0" "etcd1" "etcd2" ]; etcdNodes = [ "etcd0" "etcd1" "etcd2" ];
clusterHosts = '' clusterHosts = ''
10.253.18.100 k0-0 etcd0 kubernetes 10.253.18.100 k0-0 etcd0 kubernetes
10.253.18.100 itp-registry helm-registry.local
10.253.18.101 k0-1 etcd1 10.253.18.101 k0-1 etcd1
10.253.18.102 k0-2 etcd2 10.253.18.102 k0-2 etcd2
10.253.18.103 k0-3 10.253.18.103 k0-3
@@ -45,9 +15,23 @@ let
10.253.18.106 fs0-0 fs0-0.local 10.253.18.106 fs0-0 fs0-0.local
10.1.2.164 fs0-1 fs0-1.local 10.1.2.164 fs0-1 fs0-1.local
10.253.18.100 fs0-2 fs0-2.local 10.253.18.100 fs0-2 fs0-2.local
10.253.18.100 itp-registry registry.itpartner.no minio.itpartner.no
10.253.18.100 nuget.itpartner.no
10.253.18.109 k1-0 10.253.18.109 k1-0
''; '';
inherit certs; certs = {
ca = certs.ca;
apiserver = pki.toSet certs.apiserver;
kube-proxy = pki.toSet certs.kube-proxy;
admin = pki.toSet certs.admin;
etcd = pki.toSet certs.etcd;
k0-0 = pki.toSet certs.k0-0;
k0-1 = pki.toSet certs.k0-1;
k0-2 = pki.toSet certs.k0-2;
k0-3 = pki.toSet certs.k0-3;
k0-4 = pki.toSet certs.k0-4;
k0-5 = pki.toSet certs.k0-5;
};
}; };
in in
{ {
@@ -58,7 +42,7 @@ in
"dm_mirror" "dm_mirror"
"dm_thin_pool" "dm_thin_pool"
]; ];
services.dnsmasq.enable = true; # services.dnsmasq.enable = true;
fileSystems."/data" = { fileSystems."/data" = {
device = "fs0-0:gv0"; device = "fs0-0:gv0";
fsType = "glusterfs"; fsType = "glusterfs";
@@ -71,11 +55,11 @@ in
networking.extraHosts = '' networking.extraHosts = ''
10.253.18.100 itp-registry itp-registry.local 10.253.18.100 itp-registry itp-registry.local
10.253.18.100 helm-registry helm-registry.local 10.253.18.100 helm-registry helm-registry.local
10.253.18.100 gitlab.itpartner.no registry.itpartner.no minio.itpartner.no
10.253.18.100 nuget.local 10.253.18.100 nuget.local
10.253.18.100 kibana.local 10.253.18.100 dashboard.k0.local
10.253.18.100 dashboard.cluster.local 10.253.18.100 gitlab.k0.local
10.253.18.100 gitlab.cluster.local 10.253.18.100 baywash.k0.local
10.253.18.100 baywash.cluster.local
''; '';
systemd.services.gitlab-upgrade = { systemd.services.gitlab-upgrade = {
description = "Upgrade gitlab by zapping pod"; description = "Upgrade gitlab by zapping pod";
@@ -90,10 +74,4 @@ in
k0-3 = cluster.worker "10.253.18.103" "k0-3"; k0-3 = cluster.worker "10.253.18.103" "k0-3";
k0-4 = cluster.worker "10.253.18.107" "k0-4"; k0-4 = cluster.worker "10.253.18.107" "k0-4";
k0-5 = cluster.worker "10.253.18.108" "k0-5"; k0-5 = cluster.worker "10.253.18.108" "k0-5";
# k0-0 = cluster.plain "10.253.18.100" "k0-0";
# k0-1 = cluster.plain "10.253.18.101" "k0-1";
# k0-2 = cluster.plain "10.253.18.102" "k0-2";
# k0-3 = cluster.plain "10.253.18.103" "k0-3";
# k0-4 = cluster.plain "10.253.18.107" "k0-4";
# k0-5 = cluster.plain "10.253.18.108" "k0-5";
} }

View File

@@ -1,46 +0,0 @@
with import <nixpkgs> {};
let
pki = pkgs.callPackage ./lib/pki.nix {};
certs = {
ca = pki.ca;
apiserver = pki.apiserver ''
"10.253.18.109",
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"etcd0",
"k1-0"
'';
kube-proxy = pki.kube-proxy;
admin = pki.admin;
etcd = pki.etcd ''
"etcd0",
"etcd1",
"10.253.18.109",
"10.253.18.110",
"127.0.0.1"
'';
k1-0 = pki.worker { name = "k1-0"; ip = "10.253.18.109"; };
k1-1 = pki.worker { name = "k1-1"; ip = "10.253.18.110"; };
};
cluster = callPackage ./lib/k8s.nix {
masterNode = "10.253.18.109";
etcdNodes = [ "etcd0" "etcd1" ];
clusterHosts = ''
10.253.18.109 k1-0 etcd0 kubernetes fs0-2
10.253.18.110 k1-1 etcd1
10.253.18.106 fs0-0
10.1.2.164 fs0-1
10.253.18.100 k0-0
'';
inherit certs;
};
in
{
k1-0 = { ... }:
{
require = [ (cluster.apiserver "10.253.18.109" "k1-0" "etcd0") ];
services.dnsmasq.enable = true;
};
k1-1 = cluster.server "10.253.18.110" "k1-1" "etcd1";
}

2
kube1/build.nix Normal file
View File

@@ -0,0 +1,2 @@
with import <nixpkgs> {};
pkgs.callPackage ./certs.nix {}

29
kube1/certs.nix Normal file
View File

@@ -0,0 +1,29 @@
{ pkgs, ...}:
let
pki = pkgs.callPackage ../lib/pki.nix {};
in
{
initca = pki.initca;
ca = pki.ca;
apiserver = pki.apiserver ''
"10.253.18.109",
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"etcd0",
"k1-0"
'';
kube-proxy = pki.kube-proxy;
admin = pki.admin;
etcd = pki.etcd ''
"etcd0",
"etcd1",
"10.253.18.109",
"10.253.18.110",
"127.0.0.1"
'';
k1-0 = pki.worker { name = "k1-0"; ip = "10.253.18.109"; };
k1-1 = pki.worker { name = "k1-1"; ip = "10.253.18.110"; };
k1-2 = pki.worker { name = "k1-2"; ip = "10.253.18.111"; };
}

38
kube1/deployment.nix Normal file
View File

@@ -0,0 +1,38 @@
with import <nixpkgs> {};
let
certs = pkgs.callPackage ./certs.nix {};
pki = pkgs.callPackage ../lib/pki.nix {};
cluster = callPackage ../lib/k8s.nix {
masterNode = "10.253.18.109";
etcdNodes = [ "etcd0" "etcd1" ];
clusterHosts = ''
10.253.18.109 k1-0 etcd0 kubernetes fs0-2
10.253.18.110 k1-1 etcd1
10.253.18.111 k1-2
10.253.18.106 fs0-0
10.1.2.164 fs0-1
10.253.18.100 k0-0
10.253.18.100 gitlab.itpartner.no registry.itpartner.no minio.itpartner.no
10.253.18.109 gitlab.k1.local registry.k1.local minio.k1.local
10.253.18.100 itp-registry itp-registry.local
'';
certs = {
ca = certs.ca;
apiserver = pki.toSet certs.apiserver;
kube-proxy = pki.toSet certs.kube-proxy;
admin = pki.toSet certs.admin;
etcd = pki.toSet certs.etcd;
k1-0 = pki.toSet certs.k1-0;
k1-1 = pki.toSet certs.k1-1;
k1-2 = pki.toSet certs.k1-2;
};
};
in
{
k1-0 = { ... }:
{
require = [ (cluster.apiserver "10.253.18.109" "k1-0" "etcd0") ];
};
k1-1 = cluster.server "10.253.18.110" "k1-1" "etcd1";
k1-2 = cluster.worker "10.253.18.111" "k1-2";
}

View File

@@ -55,8 +55,8 @@ rec {
networking = { networking = {
firewall = { firewall = {
enable = true; enable = true;
allowedTCPPorts = [ 53 4194 10250 ]; allowedTCPPorts = [ 4194 10250 ];
allowedUDPPorts = [ 53 ]; # allowedUDPPorts = [ 53 ];
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE''; extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
}; };
}; };
@@ -69,7 +69,7 @@ rec {
roles = [ "master" ]; roles = [ "master" ];
kubelet.unschedulable = false; kubelet.unschedulable = false;
apiserver = { apiserver = {
bindAddress = "0.0.0.0"; #masterNode; bindAddress = "0.0.0.0"; #masterNode;
advertiseAddress = masterNode; advertiseAddress = masterNode;
authorizationMode = [ "Node" "RBAC" ]; authorizationMode = [ "Node" "RBAC" ];
securePort = 8443; securePort = 8443;
@@ -89,20 +89,25 @@ rec {
kubeconfig.server = localApiserver; kubeconfig.server = localApiserver;
}; };
scheduler.kubeconfig.server = localApiserver; scheduler.kubeconfig.server = localApiserver;
addons.dns.enable = true;
addons.dns.reconcileMode = "EnsureExists";
addons.dashboard = rec { addons.dashboard = rec {
enable = true; enable = true;
version = "v1.10.0"; version = "v1.10.0";
rbac.enable = true;
rbac.clusterAdmin = true;
tokenTtl = 0;
image = { image = {
imageName = "k8s.gcr.io/kubernetes-dashboard-amd64"; imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
imageDigest = "sha256:1d2e1229a918f4bc38b5a3f9f5f11302b3e71f8397b492afac7f273a0008776a"; imageDigest = "sha256:1d2e1229a918f4bc38b5a3f9f5f11302b3e71f8397b492afac7f273a0008776a";
finalImageTag = version; finalImageTag = version;
sha256 = "10qkqqhzkr0bcv0dlf8nq069h190pw6zjj1l5s5g438g80v8639j"; sha256 = "10qkqqhzkr0bcv0dlf8nq069h190pw6zjj1l5s5g438g80v8639j";
}; };
}; };
}; };
networking.firewall = { networking.firewall = {
allowedTCPPorts = [ 5000 8080 8443 ]; #;4053 ]; allowedTCPPorts = [ 53 5000 8080 8443 ]; #;4053 ];
# allowedUDPPorts = [ 4053 ]; allowedUDPPorts = [ 53 4053 ];
}; };
environment.systemPackages = [ pkgs.kubernetes-helm ]; environment.systemPackages = [ pkgs.kubernetes-helm ];
}; };
@@ -130,6 +135,9 @@ rec {
(../nixos/hardware-configuration + "/${instance}.nix") (../nixos/hardware-configuration + "/${instance}.nix")
../nixos/configuration.nix ../nixos/configuration.nix
]; ];
security.pki.certificateFiles = [
certs.ca.cert
];
services.glusterfs = { services.glusterfs = {
enable = true; enable = true;
tlsSettings = { tlsSettings = {
@@ -141,12 +149,14 @@ rec {
networking = { networking = {
hostName = instance; hostName = instance;
extraHosts = clusterHosts; extraHosts = clusterHosts;
# nameservers = [ masterNode ];
# dhcpcd.extraConfig = ''
# static domain_name_servers=${masterNode}
# '';
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ]; firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
firewall.allowedTCPPorts = [ 80 443 111 ]; firewall.allowedTCPPorts = [ 80 443 111 ];
firewall.allowedUDPPorts = [ 111 24007 24008 ]; firewall.allowedUDPPorts = [ 111 24007 24008 ];
}; };
environment.systemPackages = [ pkgs.tshark ];
# services.dnsmasq.enable = true;
}; };
plain = ip: name: { config, lib, pkgs, ... }: plain = ip: name: { config, lib, pkgs, ... }:
@@ -193,6 +203,8 @@ rec {
services.dockerRegistry = { services.dockerRegistry = {
enable = true; enable = true;
listenAddress = "0.0.0.0"; listenAddress = "0.0.0.0";
enableDelete = true;
enableGarbageCollect = true;
extraConfig = { extraConfig = {
REGISTRY_HTTP_TLS_CERTIFICATE = "${certs.apiserver.cert}"; REGISTRY_HTTP_TLS_CERTIFICATE = "${certs.apiserver.cert}";
REGISTRY_HTTP_TLS_KEY = "${certs.apiserver.key}"; REGISTRY_HTTP_TLS_KEY = "${certs.apiserver.key}";

View File

@@ -36,7 +36,7 @@
} }
''; '';
initca = initca' =
let let
ca_csr = gencsr { ca_csr = gencsr {
name = "kubernetes"; name = "kubernetes";
@@ -51,9 +51,9 @@
mkdir -p $out; cp *.pem $out''; mkdir -p $out; cp *.pem $out'';
# make ca derivation sha depend on initca cfssl output # make ca derivation sha depend on initca cfssl output
initca' = pkgs.stdenv.mkDerivation { initca = pkgs.stdenv.mkDerivation {
name = "ca"; name = "ca";
src = initca; src = initca';
buildCommand = '' buildCommand = ''
mkdir -p $out; mkdir -p $out;
cp -r $src/* $out cp -r $src/* $out
@@ -61,8 +61,8 @@
}; };
ca = { ca = {
key = "${initca'}/ca-key.pem"; key = "${initca}/ca-key.pem";
cert = "${initca'}/ca.pem"; cert = "${initca}/ca.pem";
}; };
cfssl = conf: '' cfssl = conf: ''
@@ -72,16 +72,16 @@
mkdir -p $out; cp *.pem $out mkdir -p $out; cp *.pem $out
''; '';
toSet = cert:
{
key = "${cert}/cert-key.pem";
cert = "${cert}/cert.pem";
};
gencert = conf: gencert = conf:
let pkgs.runCommand "${conf.name}" {
drv = pkgs.runCommand "${conf.name}" {
buildInputs = [ pkgs.cfssl ]; buildInputs = [ pkgs.cfssl ];
} (cfssl conf); } (cfssl conf);
in
{
key = "${drv}/cert-key.pem";
cert = "${drv}/cert.pem";
};
admin = gencert rec { admin = gencert rec {
name = "admin"; name = "admin";

View File

@@ -27,17 +27,23 @@
security.rtkit.enable = true; security.rtkit.enable = true;
disabledModules = [ "services/cluster/kubernetes/default.nix" ]; disabledModules = [
# "services/cluster/kubernetes/default.nix"
"services/cluster/kubernetes/dns.nix"
"services/cluster/kubernetes/dashboard.nix"
];
imports = [ imports = [
./users.nix ./users.nix
./packages.nix ./packages.nix
./overlays/kubernetes.nix ./overlays/dns.nix
./overlays/dashboard.nix
# ./overlays/kubernetes.nix
]; ];
nixpkgs.overlays = [ # nixpkgs.overlays = [
(import ./overlays/overlays.nix) # (import ./overlays/overlays.nix)
]; # ];
users.extraUsers.root.openssh.authorizedKeys.keys = [ users.extraUsers.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas"

View File

@@ -0,0 +1,21 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, ... }:
{
imports = [ ];
boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-uuid/83bb471d-1db7-4c0b-b8aa-8111730a1ea9";
fsType = "ext4";
};
swapDevices = [ ];
nix.maxJobs = lib.mkDefault 1;
}

View File

@@ -0,0 +1,330 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.kubernetes.addons.dashboard;
in {
options.services.kubernetes.addons.dashboard = {
enable = mkEnableOption "kubernetes dashboard addon";
rbac = mkOption {
description = "Role-based access control (RBAC) options";
default = {};
type = types.submodule {
options = {
enable = mkOption {
description = "Whether to enable role based access control is enabled for kubernetes dashboard";
type = types.bool;
default = elem "RBAC" config.services.kubernetes.apiserver.authorizationMode;
};
clusterAdmin = mkOption {
description = "Whether to assign cluster admin rights to the kubernetes dashboard";
type = types.bool;
default = false;
};
};
};
};
version = mkOption {
description = "Which version of the kubernetes dashboard to deploy";
type = types.str;
default = "v1.8.3";
};
image = mkOption {
description = "Docker image to seed for the kubernetes dashboard container.";
type = types.attrs;
default = {
imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
imageDigest = "sha256:dc4026c1b595435ef5527ca598e1e9c4343076926d7d62b365c44831395adbd0";
finalImageTag = cfg.version;
sha256 = "18ajcg0q1vignfjk2sm4xj4wzphfz8wah69ps8dklqfvv0164mc8";
};
};
tokenTtl = mkOption {
description = "Expiration time (in seconds) of JWE tokens generated by dashboard. Default: 15 min. 0 - never expires.";
type = types.int;
default = 15;
};
};
config = mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages = [(pkgs.dockerTools.pullImage cfg.image)];
services.kubernetes.addonManager.addons = {
kubernetes-dashboard-deployment = {
kind = "Deployment";
apiVersion = "apps/v1";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
version = cfg.version;
"kubernetes.io/cluster-service" = "true";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
spec = {
replicas = 1;
revisionHistoryLimit = 10;
selector.matchLabels."k8s-app" = "kubernetes-dashboard";
template = {
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
version = cfg.version;
"kubernetes.io/cluster-service" = "true";
};
annotations = {
"scheduler.alpha.kubernetes.io/critical-pod" = "";
};
};
spec = {
priorityClassName = "system-cluster-critical";
containers = [{
name = "kubernetes-dashboard";
image = with cfg.image; "${imageName}:${finalImageTag}";
ports = [{
containerPort = 8443;
protocol = "TCP";
}];
resources = {
limits = {
cpu = "100m";
memory = "300Mi";
};
requests = {
cpu = "100m";
memory = "100Mi";
};
};
args = [
"--auto-generate-certificates"
"--token-ttl=${toString cfg.tokenTtl}"
];
volumeMounts = [{
name = "tmp-volume";
mountPath = "/tmp";
} {
name = "kubernetes-dashboard-certs";
mountPath = "/certs";
}];
livenessProbe = {
httpGet = {
scheme = "HTTPS";
path = "/";
port = 8443;
};
initialDelaySeconds = 30;
timeoutSeconds = 30;
};
}];
volumes = [{
name = "kubernetes-dashboard-certs";
secret = {
secretName = "kubernetes-dashboard-certs";
};
} {
name = "tmp-volume";
emptyDir = {};
}];
serviceAccountName = "kubernetes-dashboard";
tolerations = [{
key = "node-role.kubernetes.io/master";
effect = "NoSchedule";
} {
key = "CriticalAddonsOnly";
operator = "Exists";
}];
};
};
};
};
kubernetes-dashboard-svc = {
apiVersion = "v1";
kind = "Service";
metadata = {
labels = {
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
k8s-app = "kubernetes-dashboard";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "KubeDashboard";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
spec = {
ports = [{
port = 443;
targetPort = 8443;
}];
selector.k8s-app = "kubernetes-dashboard";
};
};
kubernetes-dashboard-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
name = "kubernetes-dashboard";
namespace = "kube-system";
};
};
kubernetes-dashboard-sec-certs = {
apiVersion = "v1";
kind = "Secret";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-certs";
namespace = "kube-system";
};
type = "Opaque";
};
kubernetes-dashboard-sec-kholder = {
apiVersion = "v1";
kind = "Secret";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-key-holder";
namespace = "kube-system";
};
type = "Opaque";
};
kubernetes-dashboard-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-settings";
namespace = "kube-system";
};
};
} // (optionalAttrs cfg.rbac.enable
(let
subjects = [{
kind = "ServiceAccount";
name = "kubernetes-dashboard";
namespace = "kube-system";
}];
labels = {
k8s-app = "kubernetes-dashboard";
k8s-addon = "kubernetes-dashboard.addons.k8s.io";
"addonmanager.kubernetes.io/mode" = "Reconcile";
};
in
(if cfg.rbac.clusterAdmin then {
kubernetes-dashboard-crb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "ClusterRoleBinding";
metadata = {
name = "kubernetes-dashboard";
inherit labels;
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "cluster-admin";
};
inherit subjects;
};
}
else
{
# Upstream role- and rolebinding as per:
# https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/alternative/kubernetes-dashboard.yaml
kubernetes-dashboard-role = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "Role";
metadata = {
name = "kubernetes-dashboard-minimal";
namespace = "kube-system";
inherit labels;
};
rules = [
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
{
apiGroups = [""];
resources = ["secrets"];
verbs = ["create"];
}
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
{
apiGroups = [""];
resources = ["configmaps"];
verbs = ["create"];
}
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
{
apiGroups = [""];
resources = ["secrets"];
resourceNames = ["kubernetes-dashboard-key-holder"];
verbs = ["get" "update" "delete"];
}
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
{
apiGroups = [""];
resources = ["configmaps"];
resourceNames = ["kubernetes-dashboard-settings"];
verbs = ["get" "update"];
}
# Allow Dashboard to get metrics from heapster.
{
apiGroups = [""];
resources = ["services"];
resourceNames = ["heapster"];
verbs = ["proxy"];
}
{
apiGroups = [""];
resources = ["services/proxy"];
resourceNames = ["heapster" "http:heapster:" "https:heapster:"];
verbs = ["get"];
}
];
};
kubernetes-dashboard-rb = {
apiVersion = "rbac.authorization.k8s.io/v1";
kind = "RoleBinding";
metadata = {
name = "kubernetes-dashboard-minimal";
namespace = "kube-system";
inherit labels;
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "Role";
name = "kubernetes-dashboard-minimal";
};
inherit subjects;
};
})
));
};
}

329
nixos/overlays/dns.nix Normal file
View File

@@ -0,0 +1,329 @@
{ config, pkgs, lib, ... }:
with lib;
let
version = "1.2.5";
cfg = config.services.kubernetes.addons.dns;
ports = {
dns = 10053;
health = 10054;
metrics = 10055;
};
in {
options.services.kubernetes.addons.dns = {
enable = mkEnableOption "kubernetes dns addon";
clusterIp = mkOption {
description = "Dns addon clusterIP";
# this default is also what kubernetes users
default = (
concatStringsSep "." (
take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
))
) + ".254";
type = types.str;
};
clusterDomain = mkOption {
description = "Dns cluster domain";
default = "cluster.local";
type = types.str;
};
replicas = mkOption {
description = "Number of DNS pod replicas to deploy in the cluster.";
default = 2;
type = types.int;
};
reconcileMode = mkOption {
description = ''
Controls the addon manager reconciliation mode for the DNS addon.
See: <link xlink:href="https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/addon-manager/README.md"/>
'';
default = "Reconcile";
type = types.enum [ "Reconcile" "EnsureExists" ];
};
coredns = mkOption {
description = "Docker image to seed for the CoreDNS container.";
type = types.attrs;
default = {
imageName = "coredns/coredns";
imageDigest = "sha256:33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6";
finalImageTag = version;
sha256 = "13q19rgwapv27xcs664dw502254yw4zw63insf6g2danidv2mg6i";
};
};
};
config = mkIf cfg.enable {
services.kubernetes.kubelet.seedDockerImages =
singleton (pkgs.dockerTools.pullImage cfg.coredns);
services.kubernetes.addonManager.addons = {
coredns-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
};
name = "coredns";
namespace = "kube-system";
};
};
coredns-cr = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRole";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/bootstrapping" = "rbac-defaults";
};
name = "system:coredns";
};
rules = [
{
apiGroups = [ "" ];
resources = [ "endpoints" "services" "pods" "namespaces" ];
verbs = [ "list" "watch" ];
}
{
apiGroups = [ "" ];
resources = [ "nodes" ];
verbs = [ "get" ];
}
];
};
coredns-crb = {
apiVersion = "rbac.authorization.k8s.io/v1beta1";
kind = "ClusterRoleBinding";
metadata = {
annotations = {
"rbac.authorization.kubernetes.io/autoupdate" = "true";
};
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/bootstrapping" = "rbac-defaults";
};
name = "system:coredns";
};
roleRef = {
apiGroup = "rbac.authorization.k8s.io";
kind = "ClusterRole";
name = "system:coredns";
};
subjects = [
{
kind = "ServiceAccount";
name = "coredns";
namespace = "kube-system";
}
];
};
coredns-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
};
name = "coredns";
namespace = "kube-system";
};
data = {
Corefile = ".:${toString ports.dns} {
errors
health :${toString ports.health}
kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :${toString ports.metrics}
proxy . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}";
};
};
coredns-deploy = {
apiVersion = "extensions/v1beta1";
kind = "Deployment";
metadata = {
labels = {
"addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "CoreDNS";
};
name = "coredns";
namespace = "kube-system";
};
spec = {
replicas = cfg.replicas;
selector = {
matchLabels = { k8s-app = "kube-dns"; };
};
strategy = {
rollingUpdate = { maxUnavailable = 1; };
type = "RollingUpdate";
};
template = {
metadata = {
labels = {
k8s-app = "kube-dns";
};
};
spec = {
containers = [
{
args = [ "-conf" "/etc/coredns/Corefile" ];
image = with cfg.coredns; "${imageName}:${finalImageTag}";
imagePullPolicy = "Never";
livenessProbe = {
failureThreshold = 5;
httpGet = {
path = "/health";
port = ports.health;
scheme = "HTTP";
};
initialDelaySeconds = 60;
successThreshold = 1;
timeoutSeconds = 5;
};
name = "coredns";
ports = [
{
containerPort = ports.dns;
name = "dns";
protocol = "UDP";
}
{
containerPort = ports.dns;
name = "dns-tcp";
protocol = "TCP";
}
{
containerPort = ports.metrics;
name = "metrics";
protocol = "TCP";
}
];
resources = {
limits = {
memory = "170Mi";
};
requests = {
cpu = "100m";
memory = "70Mi";
};
};
securityContext = {
allowPrivilegeEscalation = false;
capabilities = {
drop = [ "all" ];
};
readOnlyRootFilesystem = true;
};
volumeMounts = [
{
mountPath = "/etc/coredns";
name = "config-volume";
readOnly = true;
}
];
}
];
dnsPolicy = "Default";
nodeSelector = {
"beta.kubernetes.io/os" = "linux";
};
serviceAccountName = "coredns";
tolerations = [
{
effect = "NoSchedule";
key = "node-role.kubernetes.io/master";
}
{
key = "CriticalAddonsOnly";
operator = "Exists";
}
];
volumes = [
{
configMap = {
items = [
{
key = "Corefile";
path = "Corefile";
}
];
name = "coredns";
};
name = "config-volume";
}
];
};
};
};
};
coredns-svc = {
apiVersion = "v1";
kind = "Service";
metadata = {
annotations = {
"prometheus.io/port" = toString ports.metrics;
"prometheus.io/scrape" = "true";
};
labels = {
"addonmanager.kubernetes.io/mode" = "Reconcile";
"k8s-app" = "kube-dns";
"kubernetes.io/cluster-service" = "true";
"kubernetes.io/name" = "CoreDNS";
};
name = "kube-dns";
namespace = "kube-system";
};
spec = {
clusterIP = cfg.clusterIp;
ports = [
{
name = "dns";
port = 53;
targetPort = ports.dns;
protocol = "UDP";
}
{
name = "dns-tcp";
port = 53;
targetPort = ports.dns;
protocol = "TCP";
}
];
selector = { k8s-app = "kube-dns"; };
};
};
};
services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp;
};
}

View File

@@ -60,7 +60,7 @@ let
cluster.server = cfg.server; cluster.server = cfg.server;
}]; }];
users = [{ users = [{
name = name'; name = "kubelet";
user = { user = {
client-certificate = cfg.certFile; client-certificate = cfg.certFile;
client-key = cfg.keyFile; client-key = cfg.keyFile;
@@ -69,9 +69,9 @@ let
contexts = [{ contexts = [{
context = { context = {
cluster = "local"; cluster = "local";
user = name'; user = "kubelet";
}; };
current-context = "default"; current-context = "kubelet-context";
}]; }];
})); }));
@@ -630,13 +630,6 @@ in {
type = types.bool; type = types.bool;
}; };
# TODO: remove this deprecated flag
cadvisorPort = mkOption {
description = "Kubernetes kubelet local cadvisor port.";
default = 4194;
type = types.int;
};
clusterDns = mkOption { clusterDns = mkOption {
description = "Use alternative DNS."; description = "Use alternative DNS.";
default = "10.1.0.1"; default = "10.1.0.1";
@@ -799,7 +792,7 @@ in {
clusterCidr = mkOption { clusterCidr = mkOption {
description = "Kubernetes controller manager and proxy CIDR Range for Pods in cluster."; description = "Kubernetes controller manager and proxy CIDR Range for Pods in cluster.";
default = "10.1.0.0/16"; default = "10.1.0.0/16";
type = types.str; type = types.nullOr types.str;
}; };
flannel.enable = mkOption { flannel.enable = mkOption {
@@ -870,7 +863,6 @@ in {
--hostname-override=${cfg.kubelet.hostname} \ --hostname-override=${cfg.kubelet.hostname} \
--allow-privileged=${boolToString cfg.kubelet.allowPrivileged} \ --allow-privileged=${boolToString cfg.kubelet.allowPrivileged} \
--root-dir=${cfg.dataDir} \ --root-dir=${cfg.dataDir} \
--cadvisor_port=${toString cfg.kubelet.cadvisorPort} \
${optionalString (cfg.kubelet.clusterDns != "") ${optionalString (cfg.kubelet.clusterDns != "")
"--cluster-dns=${cfg.kubelet.clusterDns}"} \ "--cluster-dns=${cfg.kubelet.clusterDns}"} \
${optionalString (cfg.kubelet.clusterDomain != "") ${optionalString (cfg.kubelet.clusterDomain != "")
@@ -1034,9 +1026,9 @@ in {
${if (cfg.controllerManager.rootCaFile!=null) ${if (cfg.controllerManager.rootCaFile!=null)
then "--root-ca-file=${cfg.controllerManager.rootCaFile}" then "--root-ca-file=${cfg.controllerManager.rootCaFile}"
else "--root-ca-file=/var/run/kubernetes/apiserver.crt"} \ else "--root-ca-file=/var/run/kubernetes/apiserver.crt"} \
${optionalString (cfg.clusterCidr!=null) ${if (cfg.clusterCidr!=null)
"--cluster-cidr=${cfg.clusterCidr}"} \ then "--cluster-cidr=${cfg.clusterCidr} --allocate-node-cidrs=true"
--allocate-node-cidrs=true \ else "--allocate-node-cidrs=false"} \
${optionalString (cfg.controllerManager.featureGates != []) ${optionalString (cfg.controllerManager.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.controllerManager.featureGates}"} \ "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.controllerManager.featureGates}"} \
${optionalString cfg.verbose "--v=6"} \ ${optionalString cfg.verbose "--v=6"} \

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
self: super: { self: super: {
# glusterfs = super.glusterfs.overrideAttrs (old: { super.config.services.kubernetes.addons.dns =
# buildInputs = old.buildInputs ++ [ self.lvm2 ]; super.callPackage ./dns.nix {};
# }); super.config.services.kubernetes.addons.dashboard =
super.config.services.kubernetes = super.callPackage ./kubernetes.nix {}; super.callPackage ./dashboard.nix {};
} }