Major revamp.
This commit is contained in:
173
lib/k8s.nix
Normal file
173
lib/k8s.nix
Normal file
@@ -0,0 +1,173 @@
|
||||
{ pkgs, kubeMaster, etcdNodes, clusterHosts, certs, ...}:
|
||||
let
|
||||
kubeApiserver = "https://${kubeMaster}:443";
|
||||
localApiserver = "https://127.0.0.1:8080";
|
||||
etcdEndpoints = builtins.map (x: "https://${x}:2379") etcdNodes;
|
||||
etcdCluster = builtins.map (x: "${x}=https://${x}:2380") etcdNodes;
|
||||
in
|
||||
rec {
|
||||
etcdConfig = name: {
|
||||
services.etcd = {
|
||||
inherit name;
|
||||
enable = true;
|
||||
listenClientUrls = ["https://0.0.0.0:2379"];
|
||||
listenPeerUrls = ["https://0.0.0.0:2380"];
|
||||
peerClientCertAuth = true;
|
||||
keyFile = certs.etcd.key;
|
||||
certFile = certs.etcd.cert;
|
||||
trustedCaFile = certs.ca.cert;
|
||||
advertiseClientUrls = [ "https://${name}:2379" ];
|
||||
initialAdvertisePeerUrls = [ "https://${name}:2380" ];
|
||||
initialCluster = etcdCluster;
|
||||
};
|
||||
environment.variables = {
|
||||
ETCDCTL_KEY_FILE = "${certs.admin.key}";
|
||||
ETCDCTL_CERT_FILE = "${certs.admin.cert}";
|
||||
ETCDCTL_CA_FILE = "${certs.ca.cert}";
|
||||
ETCDCTL_PEERS = "https://127.0.0.1:2379";
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 2379 2380 ];
|
||||
systemd.services.flannel.after = [ "etcd.service" ];
|
||||
};
|
||||
|
||||
clientConf = instance: {
|
||||
server = kubeApiserver;
|
||||
keyFile = certs.${instance}.key;
|
||||
certFile = certs.${instance}.cert;
|
||||
caFile = certs.ca.cert;
|
||||
};
|
||||
|
||||
kubeNode = instance: {
|
||||
services.kubernetes = rec {
|
||||
roles = [ "node" ];
|
||||
kubeconfig = clientConf instance;
|
||||
kubelet = {
|
||||
enable = true;
|
||||
clientCaFile = certs.ca.cert;
|
||||
tlsKeyFile = certs.${instance}.key;
|
||||
tlsCertFile = certs.${instance}.cert;
|
||||
networkPlugin = null;
|
||||
clusterDns = "10.0.0.254";
|
||||
extraOpts = "--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice";
|
||||
inherit kubeconfig;
|
||||
};
|
||||
};
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
# trustedInterfaces = [ "flannel.1" "docker0" "veth+" ];
|
||||
allowedTCPPorts = [ 53 4194 10250 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
|
||||
};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--insecure-registry 10.0.0.0/8";
|
||||
# systemd.services.kube-proxy.path = [pkgs.iptables pkgs.conntrack_tools pkgs.kmod];
|
||||
};
|
||||
|
||||
kubeMaster = {
|
||||
services.kubernetes = {
|
||||
roles = [ "master" ];
|
||||
kubelet.unschedulable = true;
|
||||
apiserver = {
|
||||
address = kubeMaster;
|
||||
advertiseAddress = kubeMaster;
|
||||
authorizationMode = [ "Node" "RBAC" ];
|
||||
securePort = 443;
|
||||
tlsKeyFile = certs.apiserver.key;
|
||||
tlsCertFile = certs.apiserver.cert;
|
||||
clientCaFile = certs.ca.cert;
|
||||
kubeletClientCaFile = certs.ca.cert;
|
||||
kubeletClientKeyFile = certs.apiserver.key;
|
||||
kubeletClientCertFile = certs.apiserver.cert;
|
||||
serviceAccountKeyFile = certs.apiserver.key;
|
||||
};
|
||||
scheduler.leaderElect = true;
|
||||
controllerManager = {
|
||||
leaderElect = true;
|
||||
serviceAccountKeyFile = certs.apiserver.key;
|
||||
rootCaFile = certs.ca.cert;
|
||||
kubeconfig.server = localApiserver;
|
||||
};
|
||||
scheduler.kubeconfig.server = localApiserver;
|
||||
addons.dashboard.enable = true;
|
||||
addons.dns.enable = true;
|
||||
};
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 5000 8080 443 ]; #;4053 ];
|
||||
# allowedUDPPorts = [ 4053 ];
|
||||
};
|
||||
environment.systemPackages = [ pkgs.kubernetes-helm ];
|
||||
};
|
||||
|
||||
kubeConfig = instance: {
|
||||
services.kubernetes = {
|
||||
verbose = false;
|
||||
caFile = certs.ca.cert;
|
||||
flannel.enable = true;
|
||||
clusterCidr = "10.10.0.0/16";
|
||||
etcd = {
|
||||
servers = etcdEndpoints;
|
||||
keyFile = certs.apiserver.key;
|
||||
certFile = certs.apiserver.cert;
|
||||
caFile = certs.ca.cert;
|
||||
};
|
||||
proxy = {
|
||||
kubeconfig = clientConf "kube-proxy";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nixosConfig = node: {
|
||||
imports = [ (./hardware-configuration + "/${node}.nix") ./nixos/configuration.nix ];
|
||||
networking = {
|
||||
hostName = node;
|
||||
extraHosts = clusterHosts;
|
||||
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
|
||||
firewall.allowedTCPPorts = [ 80 443 ];
|
||||
};
|
||||
environment.systemPackages = [ pkgs.tshark ];
|
||||
};
|
||||
|
||||
worker = host: ip: { config, lib, pkgs, ... }:
|
||||
let
|
||||
instance = host;
|
||||
base = nixosConfig host;
|
||||
in
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [ base (kubeConfig instance) (kubeNode instance) ];
|
||||
services.kubernetes.addons.dns.enable = false;
|
||||
};
|
||||
|
||||
server = host: etc: ip: { config, lib, pkgs, ... }:
|
||||
let
|
||||
instance = host;
|
||||
base = nixosConfig instance;
|
||||
etcd = etcdConfig etc;
|
||||
in
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [ base etcd (kubeConfig instance) (kubeNode instance) ];
|
||||
services.kubernetes.addons.dns.enable = false;
|
||||
};
|
||||
|
||||
apiserver = host: ip: etc: { config, lib, pkgs, ... }:
|
||||
let
|
||||
instance = host;
|
||||
base = nixosConfig instance;
|
||||
etcd = etcdConfig etc;
|
||||
in
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [ base etcd (kubeConfig instance) kubeMaster (kubeNode instance) ];
|
||||
services.dockerRegistry = {
|
||||
enable = true;
|
||||
listenAddress = "0.0.0.0";
|
||||
extraConfig = {
|
||||
REGISTRY_HTTP_TLS_CERTIFICATE = "${certs.apiserver.cert}";
|
||||
REGISTRY_HTTP_TLS_KEY = "${certs.apiserver.key}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user