WIP: Restructuring to new scheme
This commit is contained in:
30
clusters/kube1/default.nix
Normal file
30
clusters/kube1/default.nix
Normal file
@@ -0,0 +1,30 @@
|
||||
with import <nixpkgs> {};
|
||||
let
|
||||
settings = rec {
|
||||
master = "k1-0";
|
||||
workers = [ "k1-1" "k1-2" ];
|
||||
masterAddress = "10.253.18.109";
|
||||
apiserverAddress = "https://${masterAddress}:8443";
|
||||
clusterHosts = ''
|
||||
10.253.18.109 k1-0 kubernetes fs0-2
|
||||
10.253.18.110 k1-1
|
||||
10.253.18.111 k1-2
|
||||
10.253.18.106 fs0-0
|
||||
10.1.2.164 fs0-1
|
||||
10.253.18.100 k0-0
|
||||
10.253.18.100 gitlab.itpartner.no registry.itpartner.no minio.itpartner.no
|
||||
'';
|
||||
};
|
||||
cluster = callPackage ./k8s.nix { inherit settings; };
|
||||
in
|
||||
{
|
||||
# k1-0 = cluster.host "10.253.18.109" "k1-0";
|
||||
# k1-1 = cluster.host "10.253.18.110" "k1-1";
|
||||
# k1-2 = cluster.host "10.253.18.111" "k1-2";
|
||||
k1-0 = self:
|
||||
{
|
||||
require = [ (cluster.apiserver "10.253.18.109" "k1-0") ];
|
||||
};
|
||||
k1-1 = cluster.worker "10.253.18.110" "k1-1";
|
||||
k1-2 = cluster.worker "10.253.18.111" "k1-2";
|
||||
}
|
||||
25
clusters/kube1/deploy.sh
Executable file
25
clusters/kube1/deploy.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
id=kube1
|
||||
|
||||
# if [ $# = 0 ]; then
|
||||
# echo "usage: deploy.sh name ..."
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
if [ ! -f ./deployment.nix ]; then
|
||||
echo "error: ./ does not contain a deployment"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# mkdir -p $1/gcroots
|
||||
|
||||
# echo "--- Securing certifiates"
|
||||
# nix-build -o $1/gcroots/certs $1/build.nix
|
||||
|
||||
echo "--- Updating deployment"
|
||||
nixops modify -d $id ./deployment.nix
|
||||
|
||||
echo "--- Deploying $id"
|
||||
nixops deploy -d $id --allow-reboot
|
||||
|
||||
21
clusters/kube1/k1-0.nix
Normal file
21
clusters/kube1/k1-0.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [ ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/2e7ba83d-014f-4ef5-a1ce-fc9e34ce7b83";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
21
clusters/kube1/k1-1.nix
Normal file
21
clusters/kube1/k1-1.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [ ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/70b9d730-9cb6-48e2-8e00-fa78c8feefdf";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
21
clusters/kube1/k1-2.nix
Normal file
21
clusters/kube1/k1-2.nix
Normal file
@@ -0,0 +1,21 @@
|
||||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [ ];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/83bb471d-1db7-4c0b-b8aa-8111730a1ea9";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
136
clusters/kube1/k8s.nix
Normal file
136
clusters/kube1/k8s.nix
Normal file
@@ -0,0 +1,136 @@
|
||||
{ pkgs, lib, settings, ...}:
|
||||
with lib;
|
||||
let
|
||||
cluster-ca = pkgs.stdenv.mkDerivation {
|
||||
name = "cluster-ca";
|
||||
src = ./ca;
|
||||
buildCommand = ''
|
||||
mkdir -p $out
|
||||
cp $src/* $out
|
||||
'';
|
||||
};
|
||||
nixos-kubernetes-join-nodes = workers:
|
||||
let
|
||||
wrk = builtins.foldl' (a: s: a + " " + s) "" workers;
|
||||
in
|
||||
pkgs.writeScriptBin "nixos-kubernetes-join-nodes" ''
|
||||
#!/bin/sh
|
||||
set -e
|
||||
token=$(cat /var/lib/cfssl/apitoken.secret)
|
||||
for i in ${wrk}; do
|
||||
ssh root@$i "echo $token | sh nixos-kubernetes-node-join"
|
||||
done
|
||||
'';
|
||||
cidr = "10.10.0.0/16";
|
||||
in
|
||||
rec {
|
||||
kubeMaster = {
|
||||
services.cfssl.ca = "${cluster-ca}/ca.pem";
|
||||
services.cfssl.caKey = "${cluster-ca}/ca-key.pem";
|
||||
services.kubernetes = {
|
||||
roles = [ "master" ];
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
clusterCidr = cidr;
|
||||
kubelet.unschedulable = false;
|
||||
pki.genCfsslCACert = false;
|
||||
pki.caCertPathPrefix = "${cluster-ca}/ca";
|
||||
apiserver = {
|
||||
advertiseAddress = settings.masterAddress;
|
||||
authorizationMode = [ "Node" "RBAC" ];
|
||||
securePort = 8443;
|
||||
insecurePort = 8080;
|
||||
extraOpts = "--requestheader-client-ca-file ${cluster-ca}/ca.pem";
|
||||
};
|
||||
addons = {
|
||||
dns = {
|
||||
enable = true;
|
||||
# clusterDomain = "local";
|
||||
reconcileMode = "EnsureExists";
|
||||
};
|
||||
};
|
||||
};
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 5000 8080 8443 ]; #;4053 ];
|
||||
allowedUDPPorts = [ 53 4053 ];
|
||||
};
|
||||
environment.systemPackages = [
|
||||
pkgs.kubernetes-helm
|
||||
(nixos-kubernetes-join-nodes settings.workers)
|
||||
];
|
||||
};
|
||||
|
||||
kubeWorker = {
|
||||
services.kubernetes = rec {
|
||||
roles = [ "node" ];
|
||||
clusterCidr = cidr;
|
||||
masterAddress = settings.master;
|
||||
apiserverAddress = settings.apiserverAddress;
|
||||
};
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 4194 10250 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
extraCommands = ''iptables -m comment --comment "pod external access" -t nat -A POSTROUTING ! -d 10.10.0.0/16 -m addrtype ! --dst-type LOCAL -j MASQUERADE'';
|
||||
};
|
||||
};
|
||||
virtualisation.docker.extraOptions = "--insecure-registry 10.0.0.0/8";
|
||||
virtualisation.docker.autoPrune.enable = true;
|
||||
};
|
||||
|
||||
baseNixos = name: {
|
||||
imports = [
|
||||
(../nixos/hardware-configuration + "/${name}.nix")
|
||||
../nixos/configuration.nix
|
||||
];
|
||||
security.pki.certificateFiles = [
|
||||
"${cluster-ca}/ca.pem"
|
||||
];
|
||||
# services.glusterfs = {
|
||||
# enable = true;
|
||||
# # tlsSettings = {
|
||||
# # caCert = certs.ca.caFile;
|
||||
# # tlsKeyPath = certs.self.keyFile;
|
||||
# # tlsPem = certs.self.certFile;
|
||||
# };
|
||||
# };
|
||||
networking = {
|
||||
hostName = name;
|
||||
extraHosts = settings.clusterHosts;
|
||||
# nameservers = [ masterAddress ];
|
||||
# dhcpcd.extraConfig = ''
|
||||
# static domain_name_servers=${masterAddress}
|
||||
# '';
|
||||
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
|
||||
firewall.allowedTCPPorts = [ 80 443 111 ];
|
||||
firewall.allowedUDPPorts = [ 111 24007 24008 ];
|
||||
};
|
||||
};
|
||||
|
||||
apiserver = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeMaster
|
||||
];
|
||||
};
|
||||
|
||||
worker = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
kubeWorker
|
||||
];
|
||||
};
|
||||
|
||||
host = ip: name: self:
|
||||
{
|
||||
deployment.targetHost = ip;
|
||||
require = [
|
||||
(baseNixos name)
|
||||
];
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user