fix: add k8s and hpc modules to main repo

This commit is contained in:
Jonas Juselius
2025-06-30 12:21:05 +02:00
parent 4aa9fa677a
commit bc3a034654
46 changed files with 4393 additions and 0 deletions

476
modules/k8s/default.nix Normal file
View File

@@ -0,0 +1,476 @@
{ pkgs, lib, config, ...}:
with lib;
let
cfg = config.features.k8s;
host = config.features.host;
pki = import ./pki.nix { inherit pkgs; ca = cfg.initca; };
secret = name: "${config.services.kubernetes.secretsPath}/${name}.pem";
mkEasyCert = { name, CN, hosts ? [], fields ? {}, action ? "",
privateKeyOwner ? "kubernetes" }: rec {
inherit name CN hosts fields action;
caCert = secret "ca";
cert = secret name;
key = secret "${name}-key";
privateKeyOptions = {
owner = privateKeyOwner;
group = "nogroup";
mode = "0600";
path = key;
};
};
hostName = config.networking.hostName;
domain = config.networking.domain;
apiserverAddress = "https://${cfg.master.address}:4443";
cfssl-apitoken =
let
version = "1.0";
apitoken = pkgs.stdenv.mkDerivation {
name = "apitoken-${version}";
inherit version;
buildCommand = ''
head -c ${toString (32 / 2)} /dev/urandom | \
od -An -t x | tr -d ' ' > $out
chmod 400 $out
'';
};
in
# make ca derivation sha depend on initca cfssl output
pkgs.stdenv.mkDerivation {
name = "cfssl-apitoken";
src = apitoken;
buildCommand = ''
cp $src $out
'';
};
cluster-scripts =
with builtins;
let
first = head cfg.ingressNodes;
rest = tail cfg.ingressNodes;
ingressNodes = foldl' (a: x: a + ",${x}") first rest;
nodeNames = foldl' (a: x: a + " " + x.name) cfg.master.name cfg.nodes;
ingressReplicaCount =
toString (length cfg.ingressNodes);
etcdNodes =
let
etcdaddrs = attrValues cfg.etcdCluster.nodes;
first = head etcdaddrs;
rest = tail etcdaddrs;
in
if cfg.etcdCluster.enable && length etcdaddrs > 0
then foldl' (x: a: a + ",${x}") first rest
else "${cfg.master.address}";
show-kubernetes-charts-config = ''
#!/usr/bin/env bash
cat << EOF
# Generated by show-kubernetes-charts-config
# $(date)
# Charts in git@gitlab.com:serit/k8s/k8s-charts
top="\$(cd "\$(dirname "\$BASH_SOURCE[0]")" >/dev/null 2>&1 && pwd)"
vars=(
initca="${pki.initca}"
apiserver="${cfg.master.name}"
apiserverip="${cfg.master.address}"
etcd_nodes="${etcdNodes}"
cluster="${cfg.clusterName}"
ingress_nodes="${ingressNodes}"
ingress_replica_count="${ingressReplicaCount}"
fileserver="${cfg.fileserver}"
acme_email="${cfg.charts.acme_email}"
grafana_smtp_user="$(echo -n ${cfg.charts.grafana_smtp_user} | base64 -w0)"
grafana_smtp_password="$(echo -n ${cfg.charts.grafana_smtp_password} | base64 -w0)"
)
nodenames=( ${nodeNames} )
nodes=(${builtins.foldl' (a: x: a + " " + x.address) cfg.master.address cfg.nodes})
. \$top/functions.sh
EOF
'';
in
pkgs.stdenv.mkDerivation {
name = "cluster-scripts";
src = ./scripts;
buildCommand = ''
mkdir -p $out/bin
cp $src/* $out/bin
echo '${show-kubernetes-charts-config}' > $out/bin/show-kubernetes-charts-config
chmod a+x $out/bin/show-kubernetes-charts-config
rm $out/bin/restart-flannel.sh $out/bin/restart-kubernetes.sh
sed 's/@master@/${cfg.master.name}/; s/@nodes@/${nodeNames}/' \
$src/restart-flannel.sh > $out/bin/restart-flannel.sh
chmod a+x $out/bin/restart-flannel.sh
sed 's/@master@/${cfg.master.name}/; s/@nodes@/${nodeNames}/' \
$src/restart-kubernetes.sh > $out/bin/restart-kubernetes.sh
chmod a+x $out/bin/restart-kubernetes.sh
'';
};
etcd-cluster-scripts =
let
etcd-join-cluster = ''
#!/usr/bin/env bash
export ETCD_ADVERTISE_CLIENT_URLS=https://${host.address}:2379
export ETCD_CERT_FILE=/var/lib/kubernetes/secrets/etcd.pem
export ETCD_CLIENT_CERT_AUTH=1
export ETCD_DATA_DIR=/var/lib/etcd
export ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${host.address}:2380
export ETCD_INITIAL_CLUSTER=${host.name}=https://${host.address}:2380
export ETCD_INITIAL_CLUSTER_STATE=existing
export ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
export ETCD_KEY_FILE=/var/lib/kubernetes/secrets/etcd-key.pem
export ETCD_LISTEN_CLIENT_URLS=https://${host.address}:2379
export ETCD_LISTEN_PEER_URLS=https://${host.address}:2380
export ETCD_NAME=${host.name}
export ETCD_PEER_CERT_FILE=/var/lib/kubernetes/secrets/etcd.pem
export ETCD_PEER_KEY_FILE=/var/lib/kubernetes/secrets/etcd-key.pem
export ETCD_PEER_TRUSTED_CA_FILE=/var/lib/kubernetes/secrets/ca.pem
export ETCD_TRUSTED_CA_FILE=/var/lib/kubernetes/secrets/ca.pem
for i in $*; do
ETCD_INITIAL_CLUSTER=$ETCD_INITIAL_CLUSTER,$i
done
if [ "x${builtins.toString cfg.master.enable}" = x1 ]; then
echo "Refusing to run on master node! Exiting."
exit 0
fi
echo "WARNING! WARNING! WARNING!"
echo "This script destroys the existing etcd database on the current host!"
echo "Sleeping 10 seconds before proceeding... ctrl-c is your friend"
sleep 11 # give one second extra just in case
systemctl stop etcd.service
rm -rf /var/lib/etcd/*
sudo -E -u etcd etcd
'';
in
pkgs.stdenv.mkDerivation {
name = "etcd-cluster-scripts";
buildCommand = ''
mkdir -p $out/bin
echo '${etcd-join-cluster}' > $out/bin/etcd-join-cluster
chmod a+x $out/bin/etcd-join-cluster
'';
};
install-apitoken = ''
#!${pkgs.bash}/bin/bash
set -e
if [ "x${builtins.toString cfg.master.enable}" = x1 -a -d /var/lib/cfssl ]; then
cp ${cfssl-apitoken} /var/lib/cfssl/apitoken.secret
chown cfssl /var/lib/cfssl/apitoken.secret
chmod 640 /var/lib/cfssl/apitoken.secret
else
mkdir -p /var/lib/kubernetes/secrets
cp ${cfssl-apitoken} /var/lib/kubernetes/secrets/apitoken.secret
chown root /var/lib/kubernetes/secrets/apitoken.secret
chmod 600 /var/lib/kubernetes/secrets/apitoken.secret
fi
'';
common = {
security.pki.certificateFiles = [ "${pki.initca}/ca.pem" ];
environment.systemPackages = [
pkgs.nfs-utils
etcd-cluster-scripts
];
environment.variables = {
ETCDCTL_API = "3";
};
networking = {
firewall.allowedTCPPortRanges = [ { from = 5000; to = 50000; } ];
firewall.allowedTCPPorts = [ 80 443 111 ];
firewall.allowedUDPPorts = [ 111 24007 24008 ];
};
boot.kernel.sysctl = {
"fs.inotify.max_user_instances" = 1024;
"fs.inotify.max_user_watches" = 65536;
};
};
kubeMaster = {
services.cfssl.ca = pki.ca.cert;
services.cfssl.caKey = pki.ca.key;
services.kubernetes = {
roles = [ "master" ];
inherit apiserverAddress;
masterAddress = "${cfg.master.name}.${domain}";
clusterCidr = cfg.cidr;
pki.genCfsslCACert = false;
pki.genCfsslAPIToken = false;
pki.caCertPathPrefix = "${pki.initca}/ca";
kubelet = {
# clusterDomain = "${cfg.clusterName}.local";
};
apiserver = {
advertiseAddress = cfg.master.address;
authorizationMode = [ "Node" "RBAC" ];
allowPrivileged = true;
securePort = 4443;
serviceClusterIpRange = "10.0.0.0/22";
extraOpts = "--requestheader-client-ca-file ${pki.ca.cert}";
extraSANs = cfg.master.extraSANs;
verbosity = 2;
etcd.servers =
with builtins;
let clusterNodes = attrValues cfg.etcdCluster.nodes; in
if cfg.etcdCluster.enable && length clusterNodes > 0 then
mkForce (map (x: "https://${x}:2379") clusterNodes)
else [];
};
controllerManager = {
bindAddress = cfg.master.address;
extraOpts = "--authorization-always-allow-paths=/healthz,/metrics";
};
scheduler.address = cfg.master.address;
scheduler.extraOpts = "--authorization-always-allow-paths=/healthz,/metrics";
addonManager.enable = true;
addons = {
dns = {
enable = true;
# clusterDomain = "${cfg.clusterName}.local";
reconcileMode = "EnsureExists";
};
};
};
networking.firewall = {
allowedTCPPorts = [ 53 5000 8080 4443 4001 2379 2380 10250 10251 10252 ];
allowedUDPPorts = [ 53 4053 ];
};
environment.systemPackages = [
pkgs.kubernetes-helm
pkgs.kubectl
cluster-scripts
];
systemd.services.kube-certmgr-apitoken-bootstrap = {
description = "Kubernetes certmgr bootstrapper";
wantedBy = [ "cfssl.service" ];
before = [ "cfssl.target" ];
script = install-apitoken;
serviceConfig = {
RestartSec = "10s";
Restart = "on-failure";
};
};
systemd.services.cfssl-restart = {
enable = true;
startAt = "00/4:00";
description = "Restrart cfssl which regularly locks up";
script = "systemctl restart cfssl.service";
};
systemd.services.kube-socat-https-proxy = {
enable = cfg.master.socat443;
wantedBy = [ "kubernetes.target" ];
after = [ "kubelet.target" ];
description = "Proxy TCP port 443 to ingress NodePort at 30443";
script = "${pkgs.socat}/bin/socat TCP-LISTEN:443,fork,reuseaddr TCP:127.0.0.1:30443";
serviceConfig = {
RestartSec = "10s";
Restart = "on-failure";
};
};
};
etcdClusterNode = {
services.etcd = {
enable = true;
clientCertAuth = true;
peerClientCertAuth = true;
listenClientUrls = mkForce ["https://${host.address}:2379"];
listenPeerUrls = mkForce ["https://${host.address}:2380"];
advertiseClientUrls = mkForce ["https://${host.address}:2379"];
initialAdvertisePeerUrls = mkForce ["https://${host.address}:2380"];
name = "${host.name}";
certFile = secret "etcd";
keyFile = secret "etcd-key";
trustedCaFile = secret "ca";
extraConf =
if cfg.etcdCluster.existing
then { "INITIAL_CLUSTER_STATE" = "existing"; }
else {};
initialCluster = with builtins;
if length (attrValues cfg.etcdCluster.nodes) == 0
then mkForce [ "${host.name}=https://${host.address}:2380" ]
else mkForce (attrValues
(mapAttrs (k: v: "${k}=https://${v}:2380") cfg.etcdCluster.nodes));
};
services.kubernetes.pki.certs =
{
etcd = mkEasyCert {
name = "etcd";
CN = host.name;
hosts = [
"etcd.local"
"etcd.cluster.local"
"etcd.${domain}"
host.name
host.address
];
privateKeyOwner = "etcd";
action = "systemctl restart etcd.service";
};
};
networking.firewall = {
allowedTCPPorts = [ 2379 2380 ];
};
};
kubeNode = {
services.kubernetes = rec {
roles = [ "node" ];
inherit apiserverAddress;
# masterAddress = cfg.master.name;
masterAddress = "${cfg.master.name}.${domain}";
clusterCidr = cfg.cidr;
# kubelet.clusterDomain = "${cfg.clusterName}.local";
kubelet.hostname = "${hostName}";
proxy.hostname = "${hostName}";
proxy.extraOpts = "--metrics-bind-address 0.0.0.0:10249";
};
networking = {
firewall = {
enable = true;
allowedTCPPorts = [ 4194 10250 ];
allowedUDPPorts = [ 53 ];
};
};
virtualisation.docker.enable = false; # conflicts with containerd!
virtualisation.docker.autoPrune.enable = pkgs.lib.mkForce false; # conflicts with linkerd2
systemd.services.kube-certmgr-apitoken-bootstrap = {
description = "Kubernetes certmgr bootstrapper";
wantedBy = [ "certmgr.service" ];
before = [ "certmgr.service" ];
script = install-apitoken;
serviceConfig = {
RestartSec = "10s";
Restart = "on-failure";
};
};
};
in {
options.features.k8s = {
enable = mkEnableOption "Enable kubernetes";
clusterName = mkOption {
type = types.str;
default = null;
};
nodes = mkOption {
type = types.listOf types.attrs;
default = [];
};
fileserver = mkOption {
type = types.str;
default = null;
};
cidr = mkOption {
type = types.str;
default = "10.0.0.0/16";
};
ingressNodes = mkOption {
type = types.listOf types.str;
default = null;
};
initca = mkOption {
type = types.path;
default = null;
};
master = {
enable = mkEnableOption "Enable kubernetes master node";
address = mkOption {
type = types.str;
default = null;
};
name = mkOption {
type = types.str;
default = null;
};
extraSANs = mkOption {
type = types.listOf types.str;
default = [];
};
socat443 = mkEnableOption "Enable socat on port 443 -> 30443";
};
node = {
enable = mkEnableOption "Enable kubernetes";
};
etcdCluster = {
enable = mkEnableOption "Enable kubernetes";
existing = mkEnableOption "Existing cluster";
nodes = mkOption {
type = types.attrs;
default = { "${host.name}" = "${host.address}"; };
};
};
charts = {
acme_email = mkOption {
type = types.str;
default = "";
};
grafana_smtp_user = mkOption {
type = types.str;
default = "";
};
grafana_smtp_password = mkOption {
type = types.str;
default = "";
};
};
};
config = mkIf cfg.enable (
mkMerge [
common
(mkIf cfg.master.enable kubeMaster)
(mkIf cfg.node.enable kubeNode)
(mkIf cfg.etcdCluster.enable etcdClusterNode)
]
);
imports = [
../overrides/kubernetes_default.nix
../overrides/kubelet.nix
];
}

42
modules/k8s/initca.nix Normal file
View File

@@ -0,0 +1,42 @@
{
pkgs ? import <nixpkgs> {},
ca ? null,
name ? "ca",
algo ? "rsa",
hosts ? [],
...}:
with pkgs;
let
ca_csr = pkgs.writeText "${name}-csr.json" (builtins.toJSON {
inherit hosts;
CN = "${name}";
key = {
inherit algo;
size = if algo == "ecdsa" then 256 else 2048;
};
names = [
{
CN = "${name}";
O = "NixOS";
OU = "${name}.pki.caSpec";
L = "generated";
}
];
}
);
ca' =
pkgs.runCommand "initca" {
buildInputs = [ pkgs.cfssl ];
} '' cfssl genkey -initca ${ca_csr} | cfssljson -bare ca;
mkdir -p $out; cp *.pem $out '';
initca = if ca != null then ca else ca';
in
# make ca derivation sha depend on initca cfssl output
pkgs.stdenv.mkDerivation {
inherit name;
src = initca;
buildCommand = ''
mkdir -p $out;
cp -r $src/* $out
'';
}

82
modules/k8s/pki.nix Normal file
View File

@@ -0,0 +1,82 @@
{ pkgs, ca ? "", algo ? "rsa" }:
let
initca = import ./initca.nix { inherit pkgs ca; };
ca' = {
key = "${initca}/ca-key.pem";
cert = "${initca}/ca.pem";
};
ca-config = pkgs.writeText "ca-config.json" ''
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"default": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "8760h"
}
}
}
}
'';
gencsr = args:
let
csr = {
CN = "${args.cn}";
key = {
inherit algo;
size = if algo == "ecdsa" then 256 else 2048;
};
names = [
{
CN = "${args.cn}";
O = "${args.o}";
OU = "${args.cn}.${args.o}.pki.caSpec";
L = "generated";
}
];
hosts = args.hosts;
};
in
pkgs.writeText "${args.cn}-csr.json" (builtins.toJSON csr);
in
# Example usage:
#
# gencert { cn = "test"; ca = ca; o = "test; };
#
rec {
inherit initca;
ca = ca';
gencert = attrs:
let
conf = {
cn = attrs.cn;
ca = attrs.ca;
csr = gencsr { cn = attrs.cn; o = attrs.o; hosts = attrs.hosts; };
};
cfssl = conf:
''
cfssl gencert -ca ${ca.cert} -ca-key ${ca.key} \
-config=${ca-config} -profile=default ${conf.csr} | \
cfssljson -bare cert; \
mkdir -p $out; cp *.pem $out
'';
crt =
pkgs.runCommand "${attrs.cn}" {
buildInputs = [ pkgs.cfssl ];
} (cfssl conf);
in
{
key = "${crt}/cert-key.pem";
cert = "${crt}/cert.pem";
};
}

View File

@@ -0,0 +1 @@
for i in (seq 2 5); ssh k0- docker system prune -a;end

View File

@@ -0,0 +1,7 @@
#!/usr/bin/env bash
ETCDCTL_API=3 etcdctl --endpoints https://etcd.local:2379 \
--cacert=/var/lib/kubernetes/secrets/ca.pem \
--cert=/var/lib/kubernetes/secrets/kube-apiserver-etcd-client.pem \
--key=/var/lib/kubernetes/secrets/kube-apiserver-etcd-client-key.pem \
snapshot save snapshot.db

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
token=$(kubectl get secret -n kube-system | grep cluster-admin-token | cut -d' ' -f1)
kubectl get secret -n kube-system $token -o yaml | \
grep ' token:' | cut -d' ' -f4 | base64 -d

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bash
token=UTjgSspYQcX-BVUd1UsC
api=https://gitlab.com/api/v4
prune () {
id=$1
reg=$(curl -s --header "PRIVATE-TOKEN: $token" \
"$api/projects/$id/registry/repositories" \
| json_pp | sed -n 's/^ *"id" *: *\([0-9]\+\).*/\1/p')
for i in $reg; do
curl -s --request DELETE --data 'keep_n=10' \
--data 'name_regex=.*[0-9].*' \
--header "PRIVATE-TOKEN: $token" \
"$api/projects/$id/registry/repositories/$i/tags"
done
}
gc () {
pod=$(kubectl get pod -n gitlab -lapp=registry | tail -1 | cut -d' ' -f1)
kubectl exec -n gitlab $pod -- \
registry garbage-collect /etc/docker/registry/config.yml -m
}
all () {
groups=$(curl -s --header "PRIVATE-TOKEN: $token" "$api/groups" \
| json_pp | sed -n 's/^ *"id" *: *\([0-9]\+\).*/\1/p')
for g in $groups; do
proj=$(curl -s --header "PRIVATE-TOKEN: $token" \
"$api/groups/$g/projects?simple=true&include_subgroups=true" \
| json_pp | sed -n 's/^ \{6\}"id" *: *\([0-9]\+\).*/\1/p')
for p in $proj; do
prune $p
done
done
}
projects () {
for i in $@; do
prune $(echo $i | sed 's,/,%2F,g')
done
}
case $1 in
--all) all ;;
*) projects $@
esac
gc

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
linkerd=$(which kubectl 2> /dev/null)
if [ -z "$linkerd" ]; then
echo "linkerd cli is not available"
exit 1
fi
inject () {
for i in $@; do
kubectl get ns $i -o yaml | linkerd inject - | kubectl apply -f-
kubectl rollout restart daemonsets -n $i
kubectl rollout restart statefulsets -n $i
kubectl rollout restart deployments -n $i
done
}
if [ $# > 0 ]; then
inject $@
else
inject $(kubectl get ns | sed "1d; /kube-system/d; s/ .*//")
fi

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bash
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
if [ $# != 2 ]; then
echo "usage: inject-sa-pull-secrets.sh {namespace} {all|serviceaccount}"
exit 1
fi
namespace=$1
sa=$2
inject () {
kubectl patch serviceaccount $1 \
-n $namespace \
-p "{\"imagePullSecrets\": [ \
{\"name\": \"docker-pull-secret\"}, \
{\"name\": \"gitlab-pull-secret\"} \
]}"
}
if [ $sa = all ]; then
for i in $(kubectl get sa -n $namespace | sed '1d;s/\([^ ]\+\).*/\1/'); do
inject $i
done
else
inject $sa
fi

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env bash
set +e
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
if [ x$1 = x ]; then
ehco "usage: install-namespace.sh {namespace|all}"
exit 1
fi
namespace=$1
setup_namespace () {
local namespace
namespace=$1
cat << EOF | kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
annotations:
linkerd.io/inject: enabled
labels:
name: $namespace
name: $namespace
EOF
}
create_docker_secret () {
local namespace
namespace=$1
kubectl get secret docker-pull-secret -n $namespace >/dev/null 2>&1
[ $? = 0 ] && kubectl delete secret docker-pull-secret -n $namespace
kubectl create secret docker-registry docker-pull-secret \
-n $namespace \
--docker-username=juselius \
--docker-password=ed584a31-c7ff-47ba-8469-3f0f4db6402c \
--docker-email=jonas.juselius@gmail.com
}
create_gitlab_secret () {
local namespace
namespace=$1
cat << EOF | kubectl apply -f -
apiVersion: v1
metadata:
name: gitlab-pull-secret
namespace: $namespace
kind: Secret
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5naXRsYWIuY29tIjogewoJCQkiYXV0aCI6ICJaMmwwYkdGaUsyUmxjR3h2ZVMxMGIydGxiaTB4T1Rnd01qQTZPRmxqU0VoMFZIaENSVUZUTFZKUWRsSnJXbGM9IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy4xMiAobGludXgpIgoJfQp9Cg==
EOF
}
inject_pull_secrets () {
local namespace
namespace=$1
$TOP/inject-sa-pull-secrets.sh $namespace all
}
configure_namespace () {
setup_namespace $1
create_docker_secret $1
create_gitlab_secret $1
inject_pull_secrets $1
}
if [ "x$namespace" = "xall" ]; then
for i in $(kubectl get ns | sed '1d;/^kube-system/d;s/\([^ ]\+\).*/\1/'); do
configure_namespace $i
done
else
configure_namespace $namespace
fi

13
modules/k8s/scripts/k8s-all Executable file
View File

@@ -0,0 +1,13 @@
#!/usr/bin/env bash
# Simple script for fetching all resources from a namespace, might include some
# clutter
[ $# -ne 1 ] && echo "Usage: k8s-all [namespace]" && exit 1
for r in $(kubectl api-resources --verbs=list --namespaced -o name)
do
echo "=== Resource: $r ==="; echo \
&& kubectl get $r -n $1 --ignore-not-found \
&& echo
done

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
pods=$(kubectl get po -A -l linkerd.io/control-plane-ns -ojsonpath="{range .items[*]}{.metadata.name} {.metadata.namespace}{'\n'}{end}")
IFS=" "
while read name namespace; do
tcp=$(kubectl exec -n $namespace $name -c linkerd-proxy -- cat /proc/net/tcp)
close_wait=$(echo $tcp | awk 'BEGIN {cnt=0} $4==08 {cnt++} END {print cnt}')
fin_wait_2=$(echo $tcp | awk 'BEGIN {cnt=0} $4==05 {cnt++} END {print cnt}')
if [ "$close_wait" -gt "0" -o "$fin_wait_2" -gt "0" ]; then
echo "$name.$namespace has $close_wait sockets in CLOSE_WAIT and $fin_wait_2 sockets in FIN_WAIT_2"
else
echo "$name.$namespace is okay"
fi
done <<< "$pods"

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
kubectl delete secrets --all-namespaces --field-selector='type=kubernetes.io/service-account-token'

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# master="etcd.service"
master=""
node="flannel.service"
nodes="@nodes@"
master_node="@master@"
# nodes=$(kubectl get nodes --no-headers | cut -d' ' -f1)
# master_node=$(echo $nodes | cut -d' ' -f1)
echo "$master_node: systemctl restart $master"
sudo systemctl restart $master
for n in $nodes; do
echo "$n: systemctl restart $node"
ssh root@$n systemctl restart $node &
done
echo "Waiting..."
wait

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
master="kube-apiserver kube-scheduler kube-controller-manager"
node="kube-proxy kubelet kube-certmgr-apitoken-bootstrap"
nodes="@nodes@"
master_node="@master@"
# nodes=$(kubectl get nodes --no-headers | cut -d' ' -f1)
# master_node=$(echo $nodes | cut -d' ' -f1)
echo "$master_node: systemctl restart $master"
sudo systemctl restart $master
for n in $nodes; do
echo "$n: systemctl restart $node"
ssh root@$n systemctl restart $node &
done
echo "Waiting..."
wait

View File

@@ -0,0 +1,3 @@
#!/bin/sh
kubectl taint node $1 ClusterService="true":NoSchedule

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
repos=(
"stable=https://charts.helm.sh/stable"
"ingress-nginx=https://kubernetes.github.io/ingress-nginx"
"prometheus-community=https://prometheus-community.github.io/helm-charts"
"hashicorp=https://helm.releases.hashicorp.com"
"bitnami=https://charts.bitnami.com/bitnami"
"minio=https://helm.min.io/"
"anchore=https://charts.anchore.io"
"linkerd=https://helm.linkerd.io/stable"
)
for i in ${repos[@]}; do
IFS="="
set $i
helm repo add $1 $2
done
helm repo update

View File

@@ -0,0 +1,5 @@
for pem in /var/lib/cfssl/*.pem /var/lib/kubernetes/secrets/*.pem; do
printf 'exp: %s: %s\n' \
"$(date --date="$(openssl x509 -enddate -noout -in "$pem"|cut -d= -f 2)" --iso-8601)" \
"$pem"
done | sort

12
modules/k8s/scripts/ws-curl.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/sh
host=$1; shift
curl -i -N \
-H "Connection: upgrade"\
-H "Upgrade: websocket"\
-H "Sec-WebSocket-Key: SGVsbG8sIHdvcmxkIQ=="\
-H "Sec-WebSocket-Version: 13"\
-H "Origin: http://foo.com/"\
-H "Host: $host" $@

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
for i in (kubectl get pods -A |grep CrashLoop | sed 's/^\([^ ]\+\) \+\([^ ]\+\) .*/kubectl delete pod -n \1 \2 --force=true/'); eval $i; end

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
kubectl delete pods --field-selector 'status.phase==Failed' -A

View File

@@ -0,0 +1,5 @@
#!/usr/bin/env bash
for i in $(kubectl get nodes | sed -nr 's/^(k[^ ]+) .*/\1/p'); do
ssh root@$i pkill node_exporter
done