WIP: make new cluster config global
This commit is contained in:
@@ -1,136 +0,0 @@
|
||||
{ pkgs, lib, settings, ...}:
|
||||
with lib;
|
||||
let
|
||||
cluster-ca = pkgs.stdenv.mkDerivation {
|
||||
name = "cluster-ca";
|
||||
src = ./ca;
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cp $src/* $out
|
||||
'';
|
||||
};
|
||||
nixos-kubernetes-join-nodes = workers:
|
||||
let
|
||||
wrk = builtins.foldl' (a: s: a + " " + s) "" workers;
|
||||
in
|
||||
pkgs.writeScriptBin "nixos-kubernetes-join-nodes" ''
|
||||
#!/bin/sh
|
||||
set -e
|
||||
token=$(cat /var/lib/cfssl/apitoken.secret)
|
||||
for i in ${wrk}; do
|
||||
ssh root@$i "echo $token | sh nixos-kubernetes-node-join"
|
||||
done
|
||||
'';
|
||||
cidr = "10.10.0.0/16";
|
||||
in
|
||||
rec {
|
||||
kubeMaster = {
|
||||
services.cfssl.ca = "${cluster-ca}/ca.pem";
|
||||
services.cfssl.caKey = "${cluster-ca}/ca-key.pem";
|
||||
services.kubernetes = {
|
||||
roles = [ "master" ];
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
clusterCidr = cidr;
|
||||
kubelet.unschedulable = false;
|
||||
pki.genCfsslCACert = false;
|
||||
pki.caCertPathPrefix = "${cluster-ca}/ca";
|
||||
apiserver = {
|
||||
advertiseAddress = settings.masterAddress;
|
||||
authorizationMode = [ "Node" "RBAC" ];
|
||||
securePort = 8443;
|
||||
insecurePort = 8080;
|
||||
extraOpts = "--requestheader-client-ca-file ${cluster-ca}/ca.pem";
|
||||
};
|
||||
addons = {
|
||||
dns = {
|
||||
enable = true;
|
||||
# clusterDomain = "local";
|
||||
reconcileMode = "EnsureExists";
|
||||
};
|
||||
};
|
||||
};
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 5000 8080 8443 ]; #;4053 ];
|
||||
allowedUDPPorts = [ 53 4053 ];
|
||||
};
|
||||
environment.systemPackages = [
|
||||
pkgs.kubernetes-helm
|
||||
(nixos-kubernetes-join-nodes settings.workers)
|
||||
];
|
||||
};
|
||||
|
||||
kubeWorker = {
|
||||
services.kubernetes = rec {
|
||||
roles = [ "node" ];
|
||||
clusterCidr = cidr;
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
};
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 4194 10250 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
|
||||
};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--insecure-registry 10.0.0.0/8";
|
||||
virtualisation.docker.autoPrune.enable = true;
|
||||
};
|
||||
|
||||
baseNixos = name: {
|
||||
imports = [
|
||||
(../nixos/hardware-configuration + "/${name}.nix")
|
||||
../nixos/configuration.nix
|
||||
];
|
||||
security.pki.certificateFiles = [
|
||||
"${cluster-ca}/ca.pem"
|
||||
];
|
||||
# services.glusterfs = {
|
||||
# enable = true;
|
||||
# # tlsSettings = {
|
||||
# # caCert = certs.ca.caFile;
|
||||
# # tlsKeyPath = certs.self.keyFile;
|
||||
# # tlsPem = certs.self.certFile;
|
||||
# };
|
||||
# };
|
||||
networking = {
|
||||
hostName = name;
|
||||
extraHosts = settings.clusterHosts;
|
||||
# nameservers = [ masterAddress ];
|
||||
# dhcpcd.extraConfig = ''
|
||||
# static domain_name_servers=${masterAddress}
|
||||
# '';
|
||||
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
|
||||
firewall.allowedTCPPorts = [ 80 443 111 ];
|
||||
firewall.allowedUDPPorts = [ 111 24007 24008 ];
|
||||
};
|
||||
};
|
||||
|
||||
apiserver = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeMaster
|
||||
];
|
||||
};
|
||||
|
||||
worker = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeWorker
|
||||
];
|
||||
};
|
||||
|
||||
host = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
];
|
||||
};
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
let
|
||||
pkgs = import <nixpkgs> {};
|
||||
|
||||
runWithOpenSSL = file: cmd: pkgs.runCommand file {
|
||||
buildInputs = [ pkgs.openssl_1_1_0 ];
|
||||
} ("export RANDFILE=/tmp/rnd;" + cmd);
|
||||
|
||||
etcd_cnf = pkgs.writeText "etcd-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = etcd0
|
||||
DNS.2 = etcd1
|
||||
DNS.3 = etcd2
|
||||
DNS.4 = k8s0-0
|
||||
DNS.5 = k8s0-1
|
||||
DNS.6 = k8s0-2
|
||||
IP.1 = 127.0.0.1
|
||||
'';
|
||||
|
||||
etcd_client_cnf = pkgs.writeText "etcd-client-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth
|
||||
'';
|
||||
|
||||
apiserver_cnf = pkgs.writeText "apiserver-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = kubernetes
|
||||
DNS.2 = kubernetes.default
|
||||
DNS.3 = kubernetes.default.svc
|
||||
DNS.4 = kubernetes.default.svc.cluster.local
|
||||
DNS.4 = k8s0-0.itpartner.no
|
||||
IP.1 = 10.0.0.1
|
||||
IP.2 = 10.253.18.100
|
||||
'';
|
||||
|
||||
worker_cnf = pkgs.writeText "worker-openssl.cnf" ''
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = *.itpartner.no
|
||||
DNS.2 = *.itpartner.intern
|
||||
DNS.3 = k8s0-0
|
||||
DNS.4 = k8s0-1
|
||||
DNS.5 = k8s0-2
|
||||
DNS.6 = git01
|
||||
'';
|
||||
|
||||
ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
|
||||
ca_pem = runWithOpenSSL "ca.pem" ''
|
||||
openssl req \
|
||||
-x509 -new -nodes -key ${ca_key} \
|
||||
-days 10000 -out $out -subj "/CN=kube-ca"
|
||||
'';
|
||||
|
||||
etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
|
||||
etcd_csr = runWithOpenSSL "etcd.csr" ''
|
||||
openssl req \
|
||||
-new -key ${etcd_key} \
|
||||
-out $out -subj "/CN=etcd" \
|
||||
-config ${etcd_cnf}
|
||||
'';
|
||||
etcd_cert = runWithOpenSSL "etcd.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${etcd_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} \
|
||||
-CAcreateserial -out $out \
|
||||
-days 365 -extensions v3_req \
|
||||
-extfile ${etcd_cnf}
|
||||
'';
|
||||
|
||||
etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
|
||||
"openssl genrsa -out $out 2048";
|
||||
etcd_client_csr = runWithOpenSSL "etcd-client.csr" ''
|
||||
openssl req \
|
||||
-new -key ${etcd_client_key} \
|
||||
-out $out -subj "/CN=etcd-client" \
|
||||
-config ${etcd_client_cnf}
|
||||
'';
|
||||
etcd_client_cert = runWithOpenSSL "etcd-client.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${etcd_client_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
||||
-out $out -days 365 -extensions v3_req \
|
||||
-extfile ${etcd_client_cnf}
|
||||
'';
|
||||
|
||||
apiserver_key = runWithOpenSSL "apiserver-key.pem"
|
||||
"openssl genrsa -out $out 2048";
|
||||
apiserver_csr = runWithOpenSSL "apiserver.csr" ''
|
||||
openssl req \
|
||||
-new -key ${apiserver_key} \
|
||||
-out $out -subj "/CN=kube-apiserver" \
|
||||
-config ${apiserver_cnf}
|
||||
'';
|
||||
apiserver_cert = runWithOpenSSL "apiserver.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${apiserver_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
||||
-out $out -days 365 -extensions v3_req \
|
||||
-extfile ${apiserver_cnf}
|
||||
'';
|
||||
|
||||
worker_key = runWithOpenSSL "worker-key.pem" "openssl genrsa -out $out 2048";
|
||||
worker_csr = runWithOpenSSL "worker.csr" ''
|
||||
openssl req \
|
||||
-new -key ${worker_key} \
|
||||
-out $out -subj "/CN=kube-worker" \
|
||||
-config ${worker_cnf}
|
||||
'';
|
||||
worker_cert = runWithOpenSSL "worker.pem" ''
|
||||
openssl x509 \
|
||||
-req -in ${worker_csr} \
|
||||
-CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
|
||||
-out $out -days 365 -extensions v3_req \
|
||||
-extfile ${worker_cnf}
|
||||
'';
|
||||
in
|
||||
{
|
||||
inherit ca_key ca_pem;
|
||||
inherit etcd_key etcd_cert;
|
||||
inherit etcd_client_key etcd_client_cert;
|
||||
inherit apiserver_key apiserver_cert;
|
||||
inherit worker_key worker_cert;
|
||||
}
|
||||
244
lib/k8s.nix
244
lib/k8s.nix
@@ -1,114 +1,52 @@
|
||||
{ pkgs, masterAddress, etcdNodes, clusterHosts, certs, ...}:
|
||||
{ pkgs, lib, settings, ...}:
|
||||
with lib;
|
||||
let
|
||||
kubeApiserver = "https://${masterAddress}:8443";
|
||||
localApiserver = "http://127.0.0.1:8080";
|
||||
etcdEndpoints = builtins.map (x: "https://${x}:2379") etcdNodes;
|
||||
etcdCluster = builtins.map (x: "${x}=https://${x}:2380") etcdNodes;
|
||||
cluster-ca = pkgs.stdenv.mkDerivation {
|
||||
name = "cluster-ca";
|
||||
src = ./ca;
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cp $src/* $out
|
||||
'';
|
||||
};
|
||||
nixos-kubernetes-join-nodes = workers:
|
||||
let
|
||||
wrk = builtins.foldl' (a: s: a + " " + s) "" workers;
|
||||
in
|
||||
pkgs.writeScriptBin "nixos-kubernetes-join-nodes" ''
|
||||
#!/bin/sh
|
||||
set -e
|
||||
token=$(cat /var/lib/cfssl/apitoken.secret)
|
||||
for i in ${wrk}; do
|
||||
ssh root@$i "echo $token | sh nixos-kubernetes-node-join"
|
||||
done
|
||||
'';
|
||||
cidr = "10.10.0.0/16";
|
||||
in
|
||||
rec {
|
||||
etcdConfig = name: {
|
||||
services.etcd = {
|
||||
inherit name;
|
||||
enable = true;
|
||||
listenClientUrls = [ "https://0.0.0.0:2379" ];
|
||||
listenPeerUrls = [ "https://0.0.0.0:2380" ];
|
||||
peerClientCertAuth = true;
|
||||
keyFile = certs.etcd.key;
|
||||
certFile = certs.etcd.cert;
|
||||
trustedCaFile = certs.ca.cert;
|
||||
advertiseClientUrls = [ "https://${name}:2379" ];
|
||||
initialAdvertisePeerUrls = [ "https://${name}:2380" ];
|
||||
initialCluster = etcdCluster;
|
||||
};
|
||||
environment.variables = {
|
||||
ETCDCTL_KEY_FILE = "${certs.admin.key}";
|
||||
ETCDCTL_CERT_FILE = "${certs.admin.cert}";
|
||||
ETCDCTL_CA_FILE = "${certs.ca.cert}";
|
||||
ETCDCTL_PEERS = "https://127.0.0.1:2379";
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 2379 2380 ];
|
||||
systemd.services.flannel.after = [ "etcd.service" ];
|
||||
};
|
||||
|
||||
clientConf = instance: {
|
||||
server = kubeApiserver;
|
||||
keyFile = certs.${instance}.key;
|
||||
certFile = certs.${instance}.cert;
|
||||
caFile = certs.ca.cert;
|
||||
};
|
||||
|
||||
kubeNode = instance: {
|
||||
services.kubernetes = rec {
|
||||
inherit masterAddress;
|
||||
roles = [ "node" ];
|
||||
kubeconfig = clientConf instance;
|
||||
kubelet = {
|
||||
enable = true;
|
||||
clientCaFile = certs.ca.cert;
|
||||
tlsKeyFile = certs.${instance}.key;
|
||||
tlsCertFile = certs.${instance}.cert;
|
||||
networkPlugin = null;
|
||||
clusterDns = "10.0.0.254";
|
||||
extraOpts = "--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice";
|
||||
inherit kubeconfig;
|
||||
};
|
||||
};
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 4194 10250 ];
|
||||
# allowedUDPPorts = [ 53 ];
|
||||
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
|
||||
};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--insecure-registry 10.0.0.0/8";
|
||||
virtualisation.docker.autoPrune.enable = true;
|
||||
};
|
||||
|
||||
kubeMaster = {
|
||||
services.cfssl.ca = "${cluster-ca}/ca.pem";
|
||||
services.cfssl.caKey = "${cluster-ca}/ca-key.pem";
|
||||
services.kubernetes = {
|
||||
roles = [ "master" ];
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
clusterCidr = cidr;
|
||||
kubelet.unschedulable = false;
|
||||
pki.genCfsslCACert = false;
|
||||
pki.caCertPathPrefix = "${cluster-ca}/ca";
|
||||
apiserver = {
|
||||
bindAddress = "0.0.0.0"; #masterAddress;
|
||||
advertiseAddress = masterAddress;
|
||||
advertiseAddress = settings.masterAddress;
|
||||
authorizationMode = [ "Node" "RBAC" ];
|
||||
securePort = 8443;
|
||||
tlsKeyFile = certs.apiserver.key;
|
||||
tlsCertFile = certs.apiserver.cert;
|
||||
clientCaFile = certs.ca.cert;
|
||||
kubeletClientCaFile = certs.ca.cert;
|
||||
kubeletClientKeyFile = certs.apiserver.key;
|
||||
kubeletClientCertFile = certs.apiserver.cert;
|
||||
serviceAccountKeyFile = certs.apiserver.key;
|
||||
etcd = {
|
||||
servers = etcdEndpoints;
|
||||
keyFile = certs.apiserver.key;
|
||||
certFile = certs.apiserver.cert;
|
||||
caFile = certs.ca.cert;
|
||||
insecurePort = 8080;
|
||||
extraOpts = "--requestheader-client-ca-file ${cluster-ca}/ca.pem";
|
||||
};
|
||||
};
|
||||
scheduler.leaderElect = true;
|
||||
controllerManager = {
|
||||
leaderElect = true;
|
||||
serviceAccountKeyFile = certs.apiserver.key;
|
||||
# rootCaFile = certs.ca.cert;
|
||||
kubeconfig.server = localApiserver;
|
||||
};
|
||||
scheduler.kubeconfig.server = localApiserver;
|
||||
addons.dns.enable = true;
|
||||
addons.dns.reconcileMode = "EnsureExists";
|
||||
addons.dashboard = rec {
|
||||
addons = {
|
||||
dns = {
|
||||
enable = true;
|
||||
version = "v1.10.0";
|
||||
rbac.enable = true;
|
||||
rbac.clusterAdmin = true;
|
||||
# tokenTtl = 0;
|
||||
image = {
|
||||
imageName = "k8s.gcr.io/kubernetes-dashboard-amd64";
|
||||
imageDigest = "sha256:1d2e1229a918f4bc38b5a3f9f5f11302b3e71f8397b492afac7f273a0008776a";
|
||||
finalImageTag = version;
|
||||
sha256 = "10qkqqhzkr0bcv0dlf8nq069h190pw6zjj1l5s5g438g80v8639j";
|
||||
# clusterDomain = "local";
|
||||
reconcileMode = "EnsureExists";
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -116,39 +54,50 @@ rec {
|
||||
allowedTCPPorts = [ 53 5000 8080 8443 ]; #;4053 ];
|
||||
allowedUDPPorts = [ 53 4053 ];
|
||||
};
|
||||
environment.systemPackages = [ pkgs.kubernetes-helm ];
|
||||
environment.systemPackages = [
|
||||
pkgs.kubernetes-helm
|
||||
(nixos-kubernetes-join-nodes settings.workers)
|
||||
];
|
||||
};
|
||||
|
||||
kubeConfig = instance: {
|
||||
services.kubernetes = {
|
||||
# caFile = certs.ca.cert;
|
||||
flannel.enable = true;
|
||||
clusterCidr = "10.10.0.0/16";
|
||||
proxy = {
|
||||
kubeconfig = clientConf "kube-proxy";
|
||||
kubeWorker = {
|
||||
services.kubernetes = rec {
|
||||
roles = [ "node" ];
|
||||
clusterCidr = cidr;
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
};
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 4194 10250 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
|
||||
};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--insecure-registry 10.0.0.0/8";
|
||||
virtualisation.docker.autoPrune.enable = true;
|
||||
};
|
||||
|
||||
nixosConfig = instance: {
|
||||
baseNixos = name: {
|
||||
imports = [
|
||||
(../nixos/hardware-configuration + "/${instance}.nix")
|
||||
(../nixos/hardware-configuration + "/${name}.nix")
|
||||
../nixos/configuration.nix
|
||||
];
|
||||
security.pki.certificateFiles = [
|
||||
certs.ca.cert
|
||||
"${cluster-ca}/ca.pem"
|
||||
];
|
||||
services.glusterfs = {
|
||||
enable = true;
|
||||
tlsSettings = {
|
||||
caCert = certs.ca.cert;
|
||||
tlsKeyPath = certs.${instance}.key;
|
||||
tlsPem = certs.${instance}.cert;
|
||||
};
|
||||
};
|
||||
# services.glusterfs = {
|
||||
# enable = true;
|
||||
# # tlsSettings = {
|
||||
# # caCert = certs.ca.caFile;
|
||||
# # tlsKeyPath = certs.self.keyFile;
|
||||
# # tlsPem = certs.self.certFile;
|
||||
# };
|
||||
# };
|
||||
networking = {
|
||||
hostName = instance;
|
||||
extraHosts = clusterHosts;
|
||||
hostName = name;
|
||||
extraHosts = settings.clusterHosts;
|
||||
# nameservers = [ masterAddress ];
|
||||
# dhcpcd.extraConfig = ''
|
||||
# static domain_name_servers=${masterAddress}
|
||||
@@ -159,46 +108,29 @@ rec {
|
||||
};
|
||||
};
|
||||
|
||||
plain = ip: name: { config, lib, pkgs, ... }:
|
||||
apiserver = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(nixosConfig name)
|
||||
];
|
||||
};
|
||||
|
||||
worker = ip: name: { config, lib, pkgs, ... }:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(nixosConfig name)
|
||||
(kubeConfig name)
|
||||
(kubeNode name)
|
||||
];
|
||||
services.kubernetes.addons.dns.enable = false;
|
||||
};
|
||||
|
||||
server = ip: name: etc: { config, lib, pkgs, ... }:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(nixosConfig name)
|
||||
(etcdConfig etc)
|
||||
(kubeConfig name)
|
||||
(kubeNode name)
|
||||
];
|
||||
services.kubernetes.addons.dns.enable = false;
|
||||
};
|
||||
|
||||
apiserver = ip: name: etc: { config, lib, pkgs, ... }:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(nixosConfig name)
|
||||
(etcdConfig etc)
|
||||
(baseNixos name)
|
||||
kubeMaster
|
||||
(kubeConfig name)
|
||||
(kubeNode name)
|
||||
];
|
||||
};
|
||||
|
||||
worker = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeWorker
|
||||
];
|
||||
};
|
||||
|
||||
host = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
];
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user