fix: add k8s and hpc modules to main repo
This commit is contained in:
476
modules/k8s/default.nix
Normal file
476
modules/k8s/default.nix
Normal file
@@ -0,0 +1,476 @@
|
||||
{ pkgs, lib, config, ...}:
|
||||
with lib;
|
||||
let
|
||||
cfg = config.features.k8s;
|
||||
host = config.features.host;
|
||||
|
||||
pki = import ./pki.nix { inherit pkgs; ca = cfg.initca; };
|
||||
|
||||
secret = name: "${config.services.kubernetes.secretsPath}/${name}.pem";
|
||||
|
||||
mkEasyCert = { name, CN, hosts ? [], fields ? {}, action ? "",
|
||||
privateKeyOwner ? "kubernetes" }: rec {
|
||||
inherit name CN hosts fields action;
|
||||
caCert = secret "ca";
|
||||
cert = secret name;
|
||||
key = secret "${name}-key";
|
||||
privateKeyOptions = {
|
||||
owner = privateKeyOwner;
|
||||
group = "nogroup";
|
||||
mode = "0600";
|
||||
path = key;
|
||||
};
|
||||
};
|
||||
|
||||
hostName = config.networking.hostName;
|
||||
domain = config.networking.domain;
|
||||
|
||||
apiserverAddress = "https://${cfg.master.address}:4443";
|
||||
|
||||
cfssl-apitoken =
|
||||
let
|
||||
version = "1.0";
|
||||
apitoken = pkgs.stdenv.mkDerivation {
|
||||
name = "apitoken-${version}";
|
||||
inherit version;
|
||||
buildCommand = ''
|
||||
head -c ${toString (32 / 2)} /dev/urandom | \
|
||||
od -An -t x | tr -d ' ' > $out
|
||||
chmod 400 $out
|
||||
'';
|
||||
};
|
||||
in
|
||||
# make ca derivation sha depend on initca cfssl output
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "cfssl-apitoken";
|
||||
src = apitoken;
|
||||
buildCommand = ''
|
||||
cp $src $out
|
||||
'';
|
||||
};
|
||||
|
||||
cluster-scripts =
|
||||
with builtins;
|
||||
let
|
||||
first = head cfg.ingressNodes;
|
||||
rest = tail cfg.ingressNodes;
|
||||
ingressNodes = foldl' (a: x: a + ",${x}") first rest;
|
||||
nodeNames = foldl' (a: x: a + " " + x.name) cfg.master.name cfg.nodes;
|
||||
ingressReplicaCount =
|
||||
toString (length cfg.ingressNodes);
|
||||
etcdNodes =
|
||||
let
|
||||
etcdaddrs = attrValues cfg.etcdCluster.nodes;
|
||||
first = head etcdaddrs;
|
||||
rest = tail etcdaddrs;
|
||||
in
|
||||
if cfg.etcdCluster.enable && length etcdaddrs > 0
|
||||
then foldl' (x: a: a + ",${x}") first rest
|
||||
else "${cfg.master.address}";
|
||||
show-kubernetes-charts-config = ''
|
||||
#!/usr/bin/env bash
|
||||
cat << EOF
|
||||
# Generated by show-kubernetes-charts-config
|
||||
# $(date)
|
||||
# Charts in git@gitlab.com:serit/k8s/k8s-charts
|
||||
top="\$(cd "\$(dirname "\$BASH_SOURCE[0]")" >/dev/null 2>&1 && pwd)"
|
||||
|
||||
vars=(
|
||||
initca="${pki.initca}"
|
||||
apiserver="${cfg.master.name}"
|
||||
apiserverip="${cfg.master.address}"
|
||||
etcd_nodes="${etcdNodes}"
|
||||
cluster="${cfg.clusterName}"
|
||||
ingress_nodes="${ingressNodes}"
|
||||
ingress_replica_count="${ingressReplicaCount}"
|
||||
fileserver="${cfg.fileserver}"
|
||||
acme_email="${cfg.charts.acme_email}"
|
||||
grafana_smtp_user="$(echo -n ${cfg.charts.grafana_smtp_user} | base64 -w0)"
|
||||
grafana_smtp_password="$(echo -n ${cfg.charts.grafana_smtp_password} | base64 -w0)"
|
||||
)
|
||||
|
||||
nodenames=( ${nodeNames} )
|
||||
nodes=(${builtins.foldl' (a: x: a + " " + x.address) cfg.master.address cfg.nodes})
|
||||
|
||||
. \$top/functions.sh
|
||||
EOF
|
||||
'';
|
||||
in
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "cluster-scripts";
|
||||
src = ./scripts;
|
||||
buildCommand = ''
|
||||
mkdir -p $out/bin
|
||||
cp $src/* $out/bin
|
||||
echo '${show-kubernetes-charts-config}' > $out/bin/show-kubernetes-charts-config
|
||||
chmod a+x $out/bin/show-kubernetes-charts-config
|
||||
|
||||
rm $out/bin/restart-flannel.sh $out/bin/restart-kubernetes.sh
|
||||
sed 's/@master@/${cfg.master.name}/; s/@nodes@/${nodeNames}/' \
|
||||
$src/restart-flannel.sh > $out/bin/restart-flannel.sh
|
||||
chmod a+x $out/bin/restart-flannel.sh
|
||||
|
||||
sed 's/@master@/${cfg.master.name}/; s/@nodes@/${nodeNames}/' \
|
||||
$src/restart-kubernetes.sh > $out/bin/restart-kubernetes.sh
|
||||
chmod a+x $out/bin/restart-kubernetes.sh
|
||||
'';
|
||||
};
|
||||
|
||||
etcd-cluster-scripts =
|
||||
let
|
||||
etcd-join-cluster = ''
|
||||
#!/usr/bin/env bash
|
||||
export ETCD_ADVERTISE_CLIENT_URLS=https://${host.address}:2379
|
||||
export ETCD_CERT_FILE=/var/lib/kubernetes/secrets/etcd.pem
|
||||
export ETCD_CLIENT_CERT_AUTH=1
|
||||
export ETCD_DATA_DIR=/var/lib/etcd
|
||||
export ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${host.address}:2380
|
||||
export ETCD_INITIAL_CLUSTER=${host.name}=https://${host.address}:2380
|
||||
export ETCD_INITIAL_CLUSTER_STATE=existing
|
||||
export ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
|
||||
export ETCD_KEY_FILE=/var/lib/kubernetes/secrets/etcd-key.pem
|
||||
export ETCD_LISTEN_CLIENT_URLS=https://${host.address}:2379
|
||||
export ETCD_LISTEN_PEER_URLS=https://${host.address}:2380
|
||||
export ETCD_NAME=${host.name}
|
||||
export ETCD_PEER_CERT_FILE=/var/lib/kubernetes/secrets/etcd.pem
|
||||
export ETCD_PEER_KEY_FILE=/var/lib/kubernetes/secrets/etcd-key.pem
|
||||
export ETCD_PEER_TRUSTED_CA_FILE=/var/lib/kubernetes/secrets/ca.pem
|
||||
export ETCD_TRUSTED_CA_FILE=/var/lib/kubernetes/secrets/ca.pem
|
||||
|
||||
for i in $*; do
|
||||
ETCD_INITIAL_CLUSTER=$ETCD_INITIAL_CLUSTER,$i
|
||||
done
|
||||
|
||||
if [ "x${builtins.toString cfg.master.enable}" = x1 ]; then
|
||||
echo "Refusing to run on master node! Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "WARNING! WARNING! WARNING!"
|
||||
echo "This script destroys the existing etcd database on the current host!"
|
||||
echo "Sleeping 10 seconds before proceeding... ctrl-c is your friend"
|
||||
sleep 11 # give one second extra just in case
|
||||
|
||||
systemctl stop etcd.service
|
||||
rm -rf /var/lib/etcd/*
|
||||
sudo -E -u etcd etcd
|
||||
'';
|
||||
in
|
||||
pkgs.stdenv.mkDerivation {
|
||||
name = "etcd-cluster-scripts";
|
||||
buildCommand = ''
|
||||
mkdir -p $out/bin
|
||||
echo '${etcd-join-cluster}' > $out/bin/etcd-join-cluster
|
||||
chmod a+x $out/bin/etcd-join-cluster
|
||||
'';
|
||||
};
|
||||
|
||||
install-apitoken = ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
set -e
|
||||
if [ "x${builtins.toString cfg.master.enable}" = x1 -a -d /var/lib/cfssl ]; then
|
||||
cp ${cfssl-apitoken} /var/lib/cfssl/apitoken.secret
|
||||
chown cfssl /var/lib/cfssl/apitoken.secret
|
||||
chmod 640 /var/lib/cfssl/apitoken.secret
|
||||
else
|
||||
mkdir -p /var/lib/kubernetes/secrets
|
||||
cp ${cfssl-apitoken} /var/lib/kubernetes/secrets/apitoken.secret
|
||||
chown root /var/lib/kubernetes/secrets/apitoken.secret
|
||||
chmod 600 /var/lib/kubernetes/secrets/apitoken.secret
|
||||
fi
|
||||
'';
|
||||
|
||||
common = {
|
||||
security.pki.certificateFiles = [ "${pki.initca}/ca.pem" ];
|
||||
environment.systemPackages = [
|
||||
pkgs.nfs-utils
|
||||
etcd-cluster-scripts
|
||||
];
|
||||
environment.variables = {
|
||||
ETCDCTL_API = "3";
|
||||
};
|
||||
networking = {
|
||||
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
|
||||
firewall.allowedTCPPorts = [ 80 443 111 ];
|
||||
firewall.allowedUDPPorts = [ 111 24007 24008 ];
|
||||
};
|
||||
boot.kernel.sysctl = {
|
||||
"fs.inotify.max_user_instances" = 1024;
|
||||
"fs.inotify.max_user_watches" = 65536;
|
||||
};
|
||||
};
|
||||
|
||||
kubeMaster = {
|
||||
services.cfssl.ca = pki.ca.cert;
|
||||
services.cfssl.caKey = pki.ca.key;
|
||||
services.kubernetes = {
|
||||
roles = [ "master" ];
|
||||
inherit apiserverAddress;
|
||||
masterAddress = "${cfg.master.name}.${domain}";
|
||||
clusterCidr = cfg.cidr;
|
||||
pki.genCfsslCACert = false;
|
||||
pki.genCfsslAPIToken = false;
|
||||
pki.caCertPathPrefix = "${pki.initca}/ca";
|
||||
|
||||
kubelet = {
|
||||
# clusterDomain = "${cfg.clusterName}.local";
|
||||
};
|
||||
|
||||
apiserver = {
|
||||
advertiseAddress = cfg.master.address;
|
||||
authorizationMode = [ "Node" "RBAC" ];
|
||||
allowPrivileged = true;
|
||||
securePort = 4443;
|
||||
serviceClusterIpRange = "10.0.0.0/22";
|
||||
extraOpts = "--requestheader-client-ca-file ${pki.ca.cert}";
|
||||
extraSANs = cfg.master.extraSANs;
|
||||
verbosity = 2;
|
||||
etcd.servers =
|
||||
with builtins;
|
||||
let clusterNodes = attrValues cfg.etcdCluster.nodes; in
|
||||
if cfg.etcdCluster.enable && length clusterNodes > 0 then
|
||||
mkForce (map (x: "https://${x}:2379") clusterNodes)
|
||||
else [];
|
||||
};
|
||||
|
||||
controllerManager = {
|
||||
bindAddress = cfg.master.address;
|
||||
extraOpts = "--authorization-always-allow-paths=/healthz,/metrics";
|
||||
};
|
||||
|
||||
scheduler.address = cfg.master.address;
|
||||
scheduler.extraOpts = "--authorization-always-allow-paths=/healthz,/metrics";
|
||||
|
||||
addonManager.enable = true;
|
||||
addons = {
|
||||
dns = {
|
||||
enable = true;
|
||||
# clusterDomain = "${cfg.clusterName}.local";
|
||||
reconcileMode = "EnsureExists";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 5000 8080 4443 4001 2379 2380 10250 10251 10252 ];
|
||||
allowedUDPPorts = [ 53 4053 ];
|
||||
};
|
||||
|
||||
environment.systemPackages = [
|
||||
pkgs.kubernetes-helm
|
||||
pkgs.kubectl
|
||||
cluster-scripts
|
||||
];
|
||||
|
||||
systemd.services.kube-certmgr-apitoken-bootstrap = {
|
||||
description = "Kubernetes certmgr bootstrapper";
|
||||
wantedBy = [ "cfssl.service" ];
|
||||
before = [ "cfssl.target" ];
|
||||
script = install-apitoken;
|
||||
serviceConfig = {
|
||||
RestartSec = "10s";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.cfssl-restart = {
|
||||
enable = true;
|
||||
startAt = "00/4:00";
|
||||
description = "Restrart cfssl which regularly locks up";
|
||||
script = "systemctl restart cfssl.service";
|
||||
};
|
||||
|
||||
systemd.services.kube-socat-https-proxy = {
|
||||
enable = cfg.master.socat443;
|
||||
wantedBy = [ "kubernetes.target" ];
|
||||
after = [ "kubelet.target" ];
|
||||
description = "Proxy TCP port 443 to ingress NodePort at 30443";
|
||||
script = "${pkgs.socat}/bin/socat TCP-LISTEN:443,fork,reuseaddr TCP:127.0.0.1:30443";
|
||||
serviceConfig = {
|
||||
RestartSec = "10s";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
etcdClusterNode = {
|
||||
services.etcd = {
|
||||
enable = true;
|
||||
clientCertAuth = true;
|
||||
peerClientCertAuth = true;
|
||||
listenClientUrls = mkForce ["https://${host.address}:2379"];
|
||||
listenPeerUrls = mkForce ["https://${host.address}:2380"];
|
||||
advertiseClientUrls = mkForce ["https://${host.address}:2379"];
|
||||
initialAdvertisePeerUrls = mkForce ["https://${host.address}:2380"];
|
||||
name = "${host.name}";
|
||||
certFile = secret "etcd";
|
||||
keyFile = secret "etcd-key";
|
||||
trustedCaFile = secret "ca";
|
||||
extraConf =
|
||||
if cfg.etcdCluster.existing
|
||||
then { "INITIAL_CLUSTER_STATE" = "existing"; }
|
||||
else {};
|
||||
initialCluster = with builtins;
|
||||
if length (attrValues cfg.etcdCluster.nodes) == 0
|
||||
then mkForce [ "${host.name}=https://${host.address}:2380" ]
|
||||
else mkForce (attrValues
|
||||
(mapAttrs (k: v: "${k}=https://${v}:2380") cfg.etcdCluster.nodes));
|
||||
};
|
||||
|
||||
services.kubernetes.pki.certs =
|
||||
{
|
||||
etcd = mkEasyCert {
|
||||
name = "etcd";
|
||||
CN = host.name;
|
||||
hosts = [
|
||||
"etcd.local"
|
||||
"etcd.cluster.local"
|
||||
"etcd.${domain}"
|
||||
host.name
|
||||
host.address
|
||||
];
|
||||
privateKeyOwner = "etcd";
|
||||
action = "systemctl restart etcd.service";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 2379 2380 ];
|
||||
};
|
||||
};
|
||||
|
||||
kubeNode = {
|
||||
services.kubernetes = rec {
|
||||
roles = [ "node" ];
|
||||
inherit apiserverAddress;
|
||||
# masterAddress = cfg.master.name;
|
||||
masterAddress = "${cfg.master.name}.${domain}";
|
||||
clusterCidr = cfg.cidr;
|
||||
# kubelet.clusterDomain = "${cfg.clusterName}.local";
|
||||
kubelet.hostname = "${hostName}";
|
||||
proxy.hostname = "${hostName}";
|
||||
proxy.extraOpts = "--metrics-bind-address 0.0.0.0:10249";
|
||||
};
|
||||
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 4194 10250 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
};
|
||||
virtualisation.docker.enable = false; # conflicts with containerd!
|
||||
virtualisation.docker.autoPrune.enable = pkgs.lib.mkForce false; # conflicts with linkerd2
|
||||
systemd.services.kube-certmgr-apitoken-bootstrap = {
|
||||
description = "Kubernetes certmgr bootstrapper";
|
||||
wantedBy = [ "certmgr.service" ];
|
||||
before = [ "certmgr.service" ];
|
||||
script = install-apitoken;
|
||||
serviceConfig = {
|
||||
RestartSec = "10s";
|
||||
Restart = "on-failure";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
in {
|
||||
options.features.k8s = {
|
||||
enable = mkEnableOption "Enable kubernetes";
|
||||
|
||||
clusterName = mkOption {
|
||||
type = types.str;
|
||||
default = null;
|
||||
};
|
||||
|
||||
nodes = mkOption {
|
||||
type = types.listOf types.attrs;
|
||||
default = [];
|
||||
};
|
||||
|
||||
fileserver = mkOption {
|
||||
type = types.str;
|
||||
default = null;
|
||||
};
|
||||
|
||||
cidr = mkOption {
|
||||
type = types.str;
|
||||
default = "10.0.0.0/16";
|
||||
};
|
||||
|
||||
ingressNodes = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = null;
|
||||
};
|
||||
|
||||
initca = mkOption {
|
||||
type = types.path;
|
||||
default = null;
|
||||
};
|
||||
|
||||
master = {
|
||||
enable = mkEnableOption "Enable kubernetes master node";
|
||||
|
||||
address = mkOption {
|
||||
type = types.str;
|
||||
default = null;
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = null;
|
||||
};
|
||||
|
||||
extraSANs = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
};
|
||||
|
||||
socat443 = mkEnableOption "Enable socat on port 443 -> 30443";
|
||||
};
|
||||
|
||||
node = {
|
||||
enable = mkEnableOption "Enable kubernetes";
|
||||
};
|
||||
|
||||
etcdCluster = {
|
||||
enable = mkEnableOption "Enable kubernetes";
|
||||
existing = mkEnableOption "Existing cluster";
|
||||
nodes = mkOption {
|
||||
type = types.attrs;
|
||||
default = { "${host.name}" = "${host.address}"; };
|
||||
};
|
||||
};
|
||||
|
||||
charts = {
|
||||
acme_email = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
grafana_smtp_user = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
|
||||
grafana_smtp_password = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable (
|
||||
mkMerge [
|
||||
common
|
||||
(mkIf cfg.master.enable kubeMaster)
|
||||
(mkIf cfg.node.enable kubeNode)
|
||||
(mkIf cfg.etcdCluster.enable etcdClusterNode)
|
||||
]
|
||||
);
|
||||
|
||||
imports = [
|
||||
../overrides/kubernetes_default.nix
|
||||
../overrides/kubelet.nix
|
||||
];
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user