Migration to new setup
This commit is contained in:
@@ -3,7 +3,7 @@ let
|
||||
pki = pkgs.callPackage ../lib/pki.nix {};
|
||||
in
|
||||
{
|
||||
initca = pki.initca;
|
||||
# initca = pki.initca;
|
||||
ca = pki.ca;
|
||||
apiserver = pki.apiserver ''
|
||||
"10.253.18.109",
|
||||
|
||||
9
kube1/configure.sh
Normal file
9
kube1/configure.sh
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
helm init
|
||||
echo "Waiting for tiller"
|
||||
sleep 30
|
||||
|
||||
|
||||
helm install --namespace kube-system --name ifs1 -f ifs1.yaml stable/nfs-client-provisioner
|
||||
|
||||
25
kube1/deploy.sh
Executable file
25
kube1/deploy.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
id=kube1
|
||||
|
||||
# if [ $# = 0 ]; then
|
||||
# echo "usage: deploy.sh name ..."
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [ ! -f ./deployment.nix ]; then
|
||||
echo "error: ./ does not contain a deployment"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# mkdir -p $1/gcroots
|
||||
|
||||
# echo "--- Securing certifiates"
|
||||
# nix-build -o $1/gcroots/certs $1/build.nix
|
||||
|
||||
echo "--- Updating deployment"
|
||||
nixops modify -d $id ./deployment.nix
|
||||
|
||||
echo "--- Deploying $id"
|
||||
nixops deploy -d $id --allow-reboot
|
||||
|
||||
@@ -1,38 +1,30 @@
|
||||
with import <nixpkgs> {};
|
||||
let
|
||||
certs = pkgs.callPackage ./certs.nix {};
|
||||
pki = pkgs.callPackage ../lib/pki.nix {};
|
||||
cluster = callPackage ../lib/k8s.nix {
|
||||
masterNode = "10.253.18.109";
|
||||
etcdNodes = [ "etcd0" "etcd1" ];
|
||||
settings = rec {
|
||||
master = "k1-0";
|
||||
workers = [ "k1-1" "k1-2" ];
|
||||
masterAddress = "10.253.18.109";
|
||||
apiserverAddress = "https://${masterAddress}:8443";
|
||||
clusterHosts = ''
|
||||
10.253.18.109 k1-0 etcd0 kubernetes fs0-2
|
||||
10.253.18.110 k1-1 etcd1
|
||||
10.253.18.109 k1-0 kubernetes fs0-2
|
||||
10.253.18.110 k1-1
|
||||
10.253.18.111 k1-2
|
||||
10.253.18.106 fs0-0
|
||||
10.1.2.164 fs0-1
|
||||
10.253.18.100 k0-0
|
||||
10.253.18.100 gitlab.itpartner.no registry.itpartner.no minio.itpartner.no
|
||||
10.253.18.109 gitlab.k1.local registry.k1.local minio.k1.local
|
||||
10.253.18.100 itp-registry itp-registry.local
|
||||
'';
|
||||
certs = {
|
||||
ca = certs.ca;
|
||||
apiserver = pki.toSet certs.apiserver;
|
||||
kube-proxy = pki.toSet certs.kube-proxy;
|
||||
admin = pki.toSet certs.admin;
|
||||
etcd = pki.toSet certs.etcd;
|
||||
k1-0 = pki.toSet certs.k1-0;
|
||||
k1-1 = pki.toSet certs.k1-1;
|
||||
k1-2 = pki.toSet certs.k1-2;
|
||||
};
|
||||
};
|
||||
cluster = callPackage ./k8s.nix { inherit settings; };
|
||||
in
|
||||
{
|
||||
k1-0 = { ... }:
|
||||
# k1-0 = cluster.host "10.253.18.109" "k1-0";
|
||||
# k1-1 = cluster.host "10.253.18.110" "k1-1";
|
||||
# k1-2 = cluster.host "10.253.18.111" "k1-2";
|
||||
k1-0 = self:
|
||||
{
|
||||
require = [ (cluster.apiserver "10.253.18.109" "k1-0" "etcd0") ];
|
||||
require = [ (cluster.apiserver "10.253.18.109" "k1-0") ];
|
||||
};
|
||||
k1-1 = cluster.server "10.253.18.110" "k1-1" "etcd1";
|
||||
k1-1 = cluster.worker "10.253.18.110" "k1-1";
|
||||
k1-2 = cluster.worker "10.253.18.111" "k1-2";
|
||||
}
|
||||
|
||||
136
kube1/k8s.nix
Normal file
136
kube1/k8s.nix
Normal file
@@ -0,0 +1,136 @@
|
||||
{ pkgs, lib, settings, ...}:
|
||||
with lib;
|
||||
let
|
||||
cluster-ca = pkgs.stdenv.mkDerivation {
|
||||
name = "cluster-ca";
|
||||
src = ./ca;
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cp $src/* $out
|
||||
'';
|
||||
};
|
||||
nixos-kubernetes-join-nodes = workers:
|
||||
let
|
||||
wrk = builtins.foldl' (a: s: a + " " + s) "" workers;
|
||||
in
|
||||
pkgs.writeScriptBin "nixos-kubernetes-join-nodes" ''
|
||||
#!/bin/sh
|
||||
set -e
|
||||
token=$(cat /var/lib/cfssl/apitoken.secret)
|
||||
for i in ${wrk}; do
|
||||
ssh root@$i "echo $token | sh nixos-kubernetes-node-join"
|
||||
done
|
||||
'';
|
||||
cidr = "10.10.0.0/16";
|
||||
in
|
||||
rec {
|
||||
kubeMaster = {
|
||||
services.cfssl.ca = "${cluster-ca}/ca.pem";
|
||||
services.cfssl.caKey = "${cluster-ca}/ca-key.pem";
|
||||
services.kubernetes = {
|
||||
roles = [ "master" ];
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
clusterCidr = cidr;
|
||||
kubelet.unschedulable = false;
|
||||
pki.genCfsslCACert = false;
|
||||
pki.caCertPathPrefix = "${cluster-ca}/ca";
|
||||
apiserver = {
|
||||
advertiseAddress = settings.masterAddress;
|
||||
authorizationMode = [ "Node" "RBAC" ];
|
||||
securePort = 8443;
|
||||
insecurePort = 8080;
|
||||
extraOpts = "--requestheader-client-ca-file ${cluster-ca}/ca.pem";
|
||||
};
|
||||
addons = {
|
||||
dns = {
|
||||
enable = true;
|
||||
# clusterDomain = "local";
|
||||
reconcileMode = "EnsureExists";
|
||||
};
|
||||
};
|
||||
};
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 5000 8080 8443 ]; #;4053 ];
|
||||
allowedUDPPorts = [ 53 4053 ];
|
||||
};
|
||||
environment.systemPackages = [
|
||||
pkgs.kubernetes-helm
|
||||
(nixos-kubernetes-join-nodes settings.workers)
|
||||
];
|
||||
};
|
||||
|
||||
kubeWorker = {
|
||||
services.kubernetes = rec {
|
||||
roles = [ "node" ];
|
||||
clusterCidr = cidr;
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
};
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 4194 10250 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
|
||||
};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--insecure-registry 10.0.0.0/8";
|
||||
virtualisation.docker.autoPrune.enable = true;
|
||||
};
|
||||
|
||||
baseNixos = name: {
|
||||
imports = [
|
||||
(../nixos/hardware-configuration + "/${name}.nix")
|
||||
../nixos/configuration.nix
|
||||
];
|
||||
security.pki.certificateFiles = [
|
||||
"${cluster-ca}/ca.pem"
|
||||
];
|
||||
# services.glusterfs = {
|
||||
# enable = true;
|
||||
# # tlsSettings = {
|
||||
# # caCert = certs.ca.caFile;
|
||||
# # tlsKeyPath = certs.self.keyFile;
|
||||
# # tlsPem = certs.self.certFile;
|
||||
# };
|
||||
# };
|
||||
networking = {
|
||||
hostName = name;
|
||||
extraHosts = settings.clusterHosts;
|
||||
# nameservers = [ masterAddress ];
|
||||
# dhcpcd.extraConfig = ''
|
||||
# static domain_name_servers=${masterAddress}
|
||||
# '';
|
||||
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
|
||||
firewall.allowedTCPPorts = [ 80 443 111 ];
|
||||
firewall.allowedUDPPorts = [ 111 24007 24008 ];
|
||||
};
|
||||
};
|
||||
|
||||
apiserver = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeMaster
|
||||
];
|
||||
};
|
||||
|
||||
worker = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeWorker
|
||||
];
|
||||
};
|
||||
|
||||
host = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
];
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user