Reorganize project

This commit is contained in:
Jonas Juselius
2020-11-05 10:02:01 +01:00
parent 4876de1547
commit 6fea8b3bc8
57 changed files with 1106 additions and 319 deletions

2
.gitmodules vendored
View File

@@ -1,3 +1,3 @@
[submodule "nixos"]
path = lib/nixos
path = nixos
url = git@gitlab.com:juselius/nixos-configuration.git

View File

@@ -7,7 +7,7 @@ if [ "x$1" = "x" ]; then
exit 1
fi
ca=$TOP/lib/initca.nix
ca=$TOP/modules/initca.nix
cd $TOP/clusters/$1

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
pod=`kubectl get pods -n mssql | grep Running | grep consto-ks | cut -d' ' -f1`
bak=`kubectl exec -n mssql $pod ls -- -1 /var/opt/mssql/data/ | grep '.bak$'`
for i in $bak; do
kubectl cp mssql/$pod:/var/opt/mssql/data/$i .
done

View File

@@ -1,49 +0,0 @@
#!/usr/bin/env bash
token=UTjgSspYQcX-BVUd1UsC
api=https://gitlab.itpartner.no/api/v4
prune () {
id=$1
reg=$(curl -s --header "PRIVATE-TOKEN: $token" \
"$api/projects/$id/registry/repositories" \
| json_pp | sed -n 's/^ *"id" *: *\([0-9]\+\).*/\1/p')
for i in $reg; do
curl -s --request DELETE --data 'keep_n=10' \
--data 'name_regex=.*[0-9].*' \
--header "PRIVATE-TOKEN: $token" \
"$api/projects/$id/registry/repositories/$i/tags"
done
}
gc () {
pod=$(kubectl get pod -n gitlab -lapp=registry | tail -1 | cut -d' ' -f1)
kubectl exec -n gitlab $pod -- \
registry garbage-collect /etc/docker/registry/config.yml -m
}
all () {
groups=$(curl -s --header "PRIVATE-TOKEN: $token" "$api/groups" \
| json_pp | sed -n 's/^ *"id" *: *\([0-9]\+\).*/\1/p')
for g in $groups; do
proj=$(curl -s --header "PRIVATE-TOKEN: $token" \
"$api/groups/$g/projects?simple=true&include_subgroups=true" \
| json_pp | sed -n 's/^ \{6\}"id" *: *\([0-9]\+\).*/\1/p')
for p in $proj; do
prune $p
done
done
}
projects () {
for i in $@; do
prune $(echo $i | sed 's,/,%2F,g')
done
}
case $1 in
--all) all ;;
*) projects $@
esac
gc

View File

@@ -1,33 +0,0 @@
#!/usr/bin/env bash
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
if [ x$1 = x ]; then
ehco "usage: install-namespace.sh {namespace}"
exit 1
fi
namespace=$1
tmpfile=/tmp/new-$namespace.$$
cat << EOF > $tmpfile
apiVersion: v1
kind: Namespace
metadata:
labels:
name: $namespace
name: $namespace
---
apiVersion: v1
kind: Secret
metadata:
name: gitlab-registry-auth
namespace: $namespace
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5pdHBhcnRuZXIubm8iOiB7CgkJCSJhdXRoIjogImNtOXZkRHAwY21sdElIUnlZVzBnY0d4cGJTQndiR0Z0IgoJCX0KCX0KfQo=
EOF
kubectl apply -f $tmpfile
rm $tmpfile

View File

@@ -1,6 +1,6 @@
with import <nixpkgs> {};
let
setup = import ../../lib/default.nix { inherit pkgs cluster lib config; };
setup = import ../../modules/default.nix { inherit pkgs cluster lib config; };
hosts = [
{ name = "fs0-0"; address = "10.253.18.106"; hw = ./fs0-0.nix; }

View File

@@ -1,6 +1,6 @@
with import <nixpkgs> {};
let
setup = import ../../lib/default.nix { inherit pkgs cluster lib config; };
setup = import ../../modules/default.nix { inherit pkgs cluster lib config; };
hosts = [
{ name = "fs2-0"; address = "10.1.2.117"; hw = ./fs2-0.nix; }
@@ -9,26 +9,7 @@ let
cluster = {
initca = ./ca;
clusterName = "fs2";
extraHosts = ''
10.253.18.106 fs0-0 fs0-0.itpartner.no fs0-0.itpartner.intern
10.253.18.100 k0-0 k0-0.itpartner.no k0-0.itpartner.intern
10.253.18.101 k0-1 k0-1.itpartner.no k0-1.itpartner.intern
10.253.18.102 k0-2 k0-2.itpartner.no k0-2.itpartner.intern
10.253.18.109 k1-0 k1-0.itpartner.no k1-0.itpartner.intern
10.253.18.110 k1-1 k1-1.itpartner.no k1-1.itpartner.intern
10.253.18.111 k1-2 k1-2.itpartner.no k1-2.itpartner.intern
10.253.18.108 k1-3 k1-3.itpartner.no k1-3.itpartner.intern
10.253.18.107 k0-4 k1-4.itpartner.no k1-4.itpartner.intern
10.253.18.114 k2-0 k2-0.itpartner.no k2-0.itpartner.intern
10.253.18.115 k2-1 k2-1.itpartner.no k2-1.itpartner.intern
10.253.18.116 k2-2 k2-2.itpartner.no k2-2.itpartner.intern
10.253.18.117 k2-3 k2-3.itpartner.no k2-3.itpartner.intern
10.253.18.118 k2-4 k2-4.itpartner.no k2-4.itpartner.intern
10.253.18.103 k2-5 k2-5.itpartner.no k2-5.itpartner.intern
'';
extraHosts = import ../hosts.nix;
adminAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas"
@@ -39,7 +20,7 @@ let
enable = true;
nfs.enable = true;
nfs.exports = ''
/vol/k2 10.253.18.0/24(insecure,rw,sync,no_subtree_check,crossmnt,fsid=0,no_root_squash)
/vol/export 10.253.18.0/24(insecure,rw,sync,no_subtree_check,crossmnt,fsid=0,no_root_squash)
'';
};
};

22
clusters/hosts.nix Normal file
View File

@@ -0,0 +1,22 @@
''
10.253.18.106 fs0-0 fs0-0.itpartner.no fs0-0.itpartner.intern
10.1.2.164 fs0-1 fs0-1.itpartner.no fs0-1.itpartner.intern
10.1.2.117 fs2-0 fs2-0.itpartner.no fs2-0.itpartner.intern
10.253.18.100 k0-0 k0-0.itpartner.no k0-0.itpartner.intern
10.253.18.101 k0-1 k0-1.itpartner.no k0-1.itpartner.intern
10.253.18.102 k0-2 k0-2.itpartner.no k0-2.itpartner.intern
10.253.18.109 k1-0 k1-0.itpartner.no k1-0.itpartner.intern
10.253.18.110 k1-1 k1-1.itpartner.no k1-1.itpartner.intern
10.253.18.111 k1-2 k1-2.itpartner.no k1-2.itpartner.intern
10.253.18.108 k1-3 k1-3.itpartner.no k1-3.itpartner.intern
10.253.18.107 k0-4 k1-4.itpartner.no k1-4.itpartner.intern
10.253.18.114 k2-0 k2-0.itpartner.no k2-0.itpartner.intern
10.253.18.115 k2-1 k2-1.itpartner.no k2-1.itpartner.intern
10.253.18.116 k2-2 k2-2.itpartner.no k2-2.itpartner.intern
10.253.18.117 k2-3 k2-3.itpartner.no k2-3.itpartner.intern
10.253.18.118 k2-4 k2-4.itpartner.no k2-4.itpartner.intern
10.253.18.103 k2-5 k2-5.itpartner.no k2-5.itpartner.intern
''

View File

@@ -1,6 +1,6 @@
with import <nixpkgs> {};
let
setup = import ../../lib/default.nix { inherit pkgs cluster customize lib config; };
setup = import ../../modules/default.nix { inherit pkgs cluster customize lib config; };
hosts = [
{ name = "k0-1"; address = "10.253.18.101"; hw = ./k0-1.nix; }
@@ -14,28 +14,10 @@ let
cluster = {
clusterName = "k0";
initca = ./ca;
# domain = "itpartner.intern";
# searchDomains = [ "itpartner.intern" "itpartner.no" ];
extraHosts = ''
10.253.18.106 fs0-0 fs0-0.itpartner.no fs0-0.itpartner.intern
10.1.2.164 fs0-1 fs0-1.itpartner.no fs0-1.itpartner.intern
10.253.18.100 k0-0 k0-0.itpartner.no k0-0.itpartner.intern
10.253.18.101 k0-1 k0-1.itpartner.no k0-1.itpartner.intern
10.253.18.102 k0-2 k0-2.itpartner.no k0-2.itpartner.intern
10.253.18.109 k1-0 k1-0.itpartner.no k1-0.itpartner.intern
10.253.18.110 k1-1 k1-1.itpartner.no k1-1.itpartner.intern
10.253.18.111 k1-2 k1-2.itpartner.no k1-2.itpartner.intern
10.253.18.108 k1-3 k1-3.itpartner.no k1-3.itpartner.intern
10.253.18.107 k0-4 k1-4.itpartner.no k1-4.itpartner.intern
10.253.18.114 k2-0 k2-0.itpartner.no k2-0.itpartner.intern
10.253.18.115 k2-1 k2-1.itpartner.no k2-1.itpartner.intern
10.253.18.116 k2-2 k2-2.itpartner.no k2-2.itpartner.intern
10.253.18.117 k2-3 k2-3.itpartner.no k2-3.itpartner.intern
10.253.18.118 k2-4 k2-4.itpartner.no k2-4.itpartner.intern
10.253.18.103 k2-5 k2-5.itpartner.no k2-5.itpartner.intern
'';
extraHosts = import ../hosts.nix;
adminAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas"

View File

@@ -1,6 +1,6 @@
with import <nixpkgs> {};
let
setup = import ../../lib/default.nix { inherit pkgs cluster customize lib config; };
setup = import ../../modules/default.nix { inherit pkgs cluster customize lib config; };
hosts = [
{ name = "k1-1"; address = "10.253.18.110"; hw = ./k1-1.nix; }
@@ -16,28 +16,10 @@ let
cluster = {
clusterName = "k2";
initca = ./ca;
domain = "itpartner.intern";
searchDomains = [ "itpartner.intern" "itpartner.no" ];
extraHosts = ''
10.253.18.106 fs0-0 fs0-0.itpartner.no fs0-0.itpartner.intern
10.1.2.164 fs0-1 fs0-1.itpartner.no fs0-1.itpartner.intern
10.253.18.100 k0-0 k0-0.itpartner.no k0-0.itpartner.intern
10.253.18.101 k0-1 k0-1.itpartner.no k0-1.itpartner.intern
10.253.18.102 k0-2 k0-2.itpartner.no k0-2.itpartner.intern
10.253.18.109 k1-0 k1-0.itpartner.no k1-0.itpartner.intern
10.253.18.110 k1-1 k1-1.itpartner.no k1-1.itpartner.intern
10.253.18.111 k1-2 k1-2.itpartner.no k1-2.itpartner.intern
10.253.18.108 k1-3 k1-3.itpartner.no k1-3.itpartner.intern
10.253.18.107 k0-4 k1-4.itpartner.no k1-4.itpartner.intern
10.253.18.114 k2-0 k2-0.itpartner.no k2-0.itpartner.intern
10.253.18.115 k2-1 k2-1.itpartner.no k2-1.itpartner.intern
10.253.18.116 k2-2 k2-2.itpartner.no k2-2.itpartner.intern
10.253.18.117 k2-3 k2-3.itpartner.no k2-3.itpartner.intern
10.253.18.118 k2-4 k2-4.itpartner.no k2-4.itpartner.intern
10.253.18.103 k2-5 k2-5.itpartner.no k2-5.itpartner.intern
'';
extraHosts = import ../hosts.nix;
adminAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas"

View File

@@ -1,6 +1,6 @@
with import <nixpkgs> {};
let
setup = import ../../lib/default.nix { inherit pkgs cluster customize lib config; };
setup = import ../../modules/default.nix { inherit pkgs cluster customize lib config; };
hosts = [
{ name = "k2-1"; address = "10.253.18.115"; hw = ./k2-1.nix; }
@@ -17,28 +17,10 @@ let
cluster = {
clusterName = "k2";
initca = ./ca;
# domain = "itpartner.intern";
# searchDomains = [ "itpartner.intern" "itpartner.no" ];
extraHosts = ''
10.253.18.106 fs0-0 fs0-0.itpartner.no fs0-0.itpartner.intern
10.1.2.164 fs0-1 fs0-1.itpartner.no fs0-1.itpartner.intern
10.253.18.100 k0-0 k0-0.itpartner.no k0-0.itpartner.intern
10.253.18.101 k0-1 k0-1.itpartner.no k0-1.itpartner.intern
10.253.18.102 k0-2 k0-2.itpartner.no k0-2.itpartner.intern
10.253.18.109 k1-0 k1-0.itpartner.no k1-0.itpartner.intern
10.253.18.110 k1-1 k1-1.itpartner.no k1-1.itpartner.intern
10.253.18.111 k1-2 k1-2.itpartner.no k1-2.itpartner.intern
10.253.18.108 k1-3 k1-3.itpartner.no k1-3.itpartner.intern
10.253.18.107 k0-4 k1-4.itpartner.no k1-4.itpartner.intern
10.253.18.114 k2-0 k2-0.itpartner.no k2-0.itpartner.intern
10.253.18.115 k2-1 k2-1.itpartner.no k2-1.itpartner.intern
10.253.18.116 k2-2 k2-2.itpartner.no k2-2.itpartner.intern
10.253.18.117 k2-3 k2-3.itpartner.no k2-3.itpartner.intern
10.253.18.118 k2-4 k2-4.itpartner.no k2-4.itpartner.intern
10.253.18.103 k2-5 k2-5.itpartner.no k2-5.itpartner.intern
'';
extraHosts = import ../hosts.nix;
adminAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf jonas"

View File

@@ -1,22 +0,0 @@
with import <nixpkgs> {};
let
settings = rec {
master = "node1";
workers = [ "node2" ];
masterAddress = "10.10.10.1";
apiserverAddress = "https://${masterAddress}:4443";
clusterHosts = ''
10.10.10.1 node1 kubernetes
10.10.10.2 node2
10.10.20.1 fs1
'';
adminAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKiAS30ZO+wgfAqDE9Y7VhRunn2QszPHA5voUwo+fGOf admin"
];
};
cluster = callPackage ../../lib/k8s.nix { here = ./.; inherit settings; };
in
{
node1 = cluster.apiserver "10.10.10.1" "node1";
node2 = cluster.worker "10.10.10.2" "node2";
}

View File

@@ -1,21 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, ... }:
{
imports = [ ];
boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-label/nixos";
fsType = "ext4";
};
swapDevices = [ ];
nix.maxJobs = lib.mkDefault 1;
}

View File

@@ -1,21 +0,0 @@
# Do not modify this file! It was generated by nixos-generate-config
# and may be overwritten by future invocations. Please make changes
# to /etc/nixos/configuration.nix instead.
{ config, lib, pkgs, ... }:
{
imports = [ ];
boot.initrd.availableKernelModules = [ "ata_piix" "mptspi" "floppy" "sd_mod" "sr_mod" ];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "/dev/disk/by-label/nixos";
fsType = "ext4";
};
swapDevices = [ ];
nix.maxJobs = lib.mkDefault 1;
}

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
if [ x$1 = x ]; then
ehco "usage: setup-namespace.sh {namespace}"
exit 1
fi
namespace=$1
tmpfile=/tmp/helm-$namespace.$$
cat << EOF > $tmpfile
apiVersion: v1
kind: Namespace
metadata:
labels:
name: $namespace
name: $namespace
---
apiVersion: v1
metadata:
name: gitlab-pull-secret
namespace: $namespace
kind: Secret
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5naXRsYWIuY29tIjogewoJCQkiYXV0aCI6ICJaMmwwYkdGaUsyUmxjR3h2ZVMxMGIydGxiaTB4T1Rnd01qQTZPRmxqU0VoMFZIaENSVUZUTFZKUWRsSnJXbGM9IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy4xMiAobGludXgpIgoJfQp9Cg==
---
apiVersion: v1
kind: Secret
metadata:
name: kestrel-tls
namespace: $namespace
type: Opaque
data:
kestrel.pfx: MIIJcQIBAzCCCTcGCSqGSIb3DQEHAaCCCSgEggkkMIIJIDCCA9cGCSqGSIb3DQEHBqCCA8gwggPEAgEAMIIDvQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQINE11xXT7iV4CAggAgIIDkBMylQRDdNJTEryjKEYajwYVWfkJDmEXfToulTYOU1Jv1q7z+le15hCGwauS/yDRCS4QjcTmW+XT7MopnqLlVXDF2dZbk+a1ThTiaToTqXbRWpI2sfzuFjbA6cYPJNonBDKKNwUmewnAog37u9qaQk2MCsaUw6t7pBp7HpvtnVR/GbsbY98udx6kqATlyZtNnhg8QhgTF9dfGf7VeQj0wq1gGaiGXq0kNJBwod7my8caQD3gUtRQf0ZKZN6RF8r2a4mjf0YyOBsLtSbZ7bceHtdN5PlOi1wu47XSAhSzqNHpNM8K4o0HvMol1m8QzQSmM6KY6vOrTagX+rSejV5aX82gdpiNjRa8HGO5+S8oRsL2/xX2FoxzUCkpzjyoAHJ6Bd25tem/ls4l921hlVmHZuseMiuwMisBw+bTYXEmug3SK54wkZi0nkjRAUTTZRy5KqYWDYXzxuT6MZPiROQRv66PpAG6IPtnhv0iIyszwAlYf6zZcT8Xlh6M9tMPuDFEKMzUbff8/FUWPrLLAZIuPC1PjbmkQ+bCrqN2JkDoJKbJjs8FEvq45vaG/R9rKnWeXakbrcKt7iEVQRUynHfXheZMPfhyB2QBS5gO5mjVLx062Lf+4h5oAf43Kbu5iGfYDTQHazW1jfMCfq87ufvMVlAlqJ0TQCUDPcDjW0o5MAv5wJibOciw5IJ/AEXV42apWUsei2sKB62JcFSiwUc+7a4QcCh0Cn4pgBjpi4T9v0mOWOCcu26IeJPeBpAW+4fgMfmiL5AfGCeY4YNiTrK0yHaUBA7kLCXCPKUHKYP71WkVeuooih0yJJZ/ZqWN+aIOm0c/DPAjUgkEVVtZXescW2Ae0NgdMhMeJ5kfsPYlTeOtFwzoSRu8wMPBr/Ufg2aEWc5GaZRbQmFzvmcg9aPFpltQ0XXGyaD+c6JR2t5b5YgH5MLRh5uZmYhFIBwBHIUQZ9Sc+7pjHUt9TnnVz3fT72pGi47Py5mm0W95euC2YucqclSQ7wjj4OKqgNKDp4o/ALZaZUURZeLl8xwsQ9liAiw2hEw9tvFvdWb9RM3wVEU5ol9n0OnReOSzYDMfUaUxiVTnA8r3SuavdbsuiyWpZ6lJJQRuwNhUfVat+c39OamXbe1J9E7wRDxAISKE2+JofaTOublkMNsaxP2TYDs9xDL/0oCHCxyu2hoCyOK8b94gw2yyW/+UdqUxLHdBgEvEbjCCBUEGCSqGSIb3DQEHAaCCBTIEggUuMIIFKjCCBSYGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAi7oqckaHZOQQICCAAEggTIlVZIQOhHhHchiWYqwlt9zdOP21UU3v9ghDAwuimISuLxTyfr+HXC9+5TRW5eKygiBv9czjZuZcUTQxHLlW2KQmz5EVik/CeAbhmgK3XL9YGTZxGccSNGaS9dLvxWVDbeCoNK7rr9R3zV4vWLPerfGjezpT/VWHLie0pBaNbHg8GHhwlLBSzo+ODkSD1N1cf/6wAny3Cdf8mwgRVmT5dQ7DVfnpkXNQ5Mztu8faLeMjURU2XCZecOZ4vVxr5cCCnOn4vWMPxwGeJUQVVt1M1BFyLg3DyTBpHST0qkV4PKDESjmha8d0zc+ifKr10e1a8LU9KHWsb8L0in2gTor8F9QwoYhtx2UQdx4n4qQ3GKoR0nfBl3aZw97hdPJgDKKoM1BFpXMg3LRMQSL/0FP2JfKQt/Qni3vvGE5A207ouMU+0G9qHdniR7DVmbmDEOTJQgbeYivLPOuYlXSx2aK51VUxO+agOnxLg8RYqOo8Ex7ZtnxjpfQQVirMJ3yPdD3cqUVLiJ/Y5xFOvwjoLNdlxxTC+QgsCN1K+Vg3KY+pn8d/iAmrMfUs992jz0xwXrUBG6V2lrXE6dI3SpTT8/h265Cd8KjHiXSJOP02sHm0PWTVQLAeJluV6DLgHB73jQ6fb01AbVkbG8lL2fhWl1R5cD7OdAIP7n6FgoSrzA/eqKTLYPKIbA73HdPCMm2zzb5mdlo8rh+0FJ8vegRJ4DFGag/FpAJRxeUnyLyO29tOKpp/u9uwrYDooYn5ci5gLeehUfGqEJlNp8GPbqiKAkWwNLaNguJA+kT0v7XVoEdNkDB6upObyJOObHl1W6s5vHFarNojcNINgTV3sEJT5tDuLZ282Lw99Wg1lvUGJbdO3dgq5NLey11cmRTBR/KyqSoYPGUfC8Aihfo/djUbXUjs/I6WuPRoqcSzQsjqHt7hR7aHF2ahKw2WxQhT27jgUpTcgd1O00uOsb3BPhskpY9ggRNs9AaVJu2RHyxwX2TWkY/AmMYG8UIRmKkAzLctUVwSIEY27GbluvSLtIhVl4I7DV8SYcWkxu/1NoT8aMlQMJeULgFMzG49GWnJDOgOXxcTRUFL3hniisWU5h8PPNxIGLqYf4C5ocXPCg7sap+6IrEm3lP9nwXlhMfHOMbKRX5p0W+0bzEtf4sZlwt6An5+WJmIP0oegz7tzsJNzEiDShK0TaEgfRyBi+NM781zGOCN7X4Lvzl3L5CAAtRGhfYMH52X7vf70Gf74wREa75O91NurJTaRMlztWoA7vAI2maYoPO9wyBWIsQDyv4cmL3xCai0TIza7Wtu8SHKnJCKGp90fftNU8PNlN6StVi2y8VKY+whRqFcZR1dbx+ClsPOHAcosNkZ3Vv9EuieSZaCSNT7dOvCiSgVgzuLg3CC/SBPfzVeqaoLL4xMVtfKXqZfX7SDoftNlD8rlY+hHR1pdxpvKhZPIgDkMRAZ8Z6IWKWdyRACeNM5NWK56e9D8zm5VDKDZCz1n/zozH577ABvj6+dZkahl/FKpryFxY4qtKxnYXXd8DVYt6t1NFIz3Ybov3r/7fHYqm8OLmF9FZqeC4gqr9HUbuDkaU0mPCHtWrL2nkhhuSKR4sm2VfhUSegJkiTKvD5+DhpIaDMSUwIwYJKoZIhvcNAQkVMRYEFJpr2WGeI1IjCffN9Qs1YLuF26qUMDEwITAJBgUrDgMCGgUABBRuJCgviB/YoTN9wqikECF7WyAN9QQI/4JQvFeDBswCAggA
EOF
kubectl apply -f $tmpfile
rm $tmpfile

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
token=UTjgSspYQcX-BVUd1UsC
api=https://gitlab.itpartner.no/api/v4
api=https://gitlab.com/api/v4
prune () {
id=$1

View File

@@ -85,4 +85,8 @@ install_certmgr
install_charts
install_prometheus
# helm install -n kube-system -f sentry.yaml --wait --timeout=1000s sentry stable/sentry
# helm install -n vault -f vault-values.yaml vault hashicorp/vault
# helm install -n monitoring -f kube-prometheus-stack.yaml prometheus prometheus-community/kube-prometheus-stack
# vim:ft=sh

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
master="etcd.service"
node="flannel.service"
nodes=$(kubectl get nodes --no-headers | cut -d' ' -f1)
master_node=$(echo $nodes | cut -d' ' -f1)
echo "$master_node: systemctl restart $master"
sudo systemctl restart $master
for n in $nodes; do
echo "$n: systemctl restart $node"
ssh root@$n systemctl restart $node &
done
echo "Waiting..."
wait

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
master="kube-apiserver kube-scheduler kube-controller-manager"
node="kube-proxy kubelet kube-certmgr-apitoken-bootstrap"
nodes=$(kubectl get nodes --no-headers | cut -d' ' -f1)
master_node=$(echo $nodes | cut -d' ' -f1)
echo "$master_node: systemctl restart $master"
sudo systemctl restart $master
for n in $nodes; do
echo "$n: systemctl restart $node"
ssh root@$n systemctl restart $node &
done
echo "Waiting..."
wait

View File

@@ -0,0 +1,12 @@
read -r -d '' repos << EOF
jetstack;https://charts.jetstack.io
stable;https://kubernetes-charts.storage.googleapis.com/
minio;https://helm.min.io/
anchore;https://charts.anchore.io
bitnami;https://charts.bitnami.com/bitnami
hashicorp;https://helm.releases.hashicorp.com
ingress-nginx;https://kubernetes.github.io/ingress-nginx
prometheus-community;https://prometheus-community.github.io/helm-charts
EOF
for i in $repos; do IFS=";"; set $i; helm repo add $1 $2; done

View File

@@ -0,0 +1,46 @@
# helm repo add minio https://helm.min.io/
# helm install --version 6.0.5 -f minio.yaml -n minio minio minio/minio
accessKey: Mkd324ijlnfll23883
secretKey: KJQfefrnflol93jpj31mrkjs3i88sj2L
# environment:
# MINIO_ACCESS_KEY_OLD: YOURACCESSKEY
# MINIO_SECRET_KEY_OLD: YOURSECRETKEY
defaultBucket:
enabled: true
name: default
policy: none
purge: false
buckets:
- name: serit
policy: none
purge: false
- name: gitlab
policy: none
purge: false
clusterDomain: kube2.local
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/whitelist-source-range: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
hosts:
- minio.k2.local
tls:
- hosts:
- minio.k2.local
secretName: minio-tls
persistence:
enabled: true
size: 100Gi
storageClass: managed-nfs-storage

View File

@@ -0,0 +1,287 @@
# helm install --namespace kube-system --timeout 1000 -f sentry.yaml sentry stable/sentry
# image:
# repository: sentry
# tag: 9
# pullPolicy: IfNotPresent
# # Add the secret name to pull from a private registry.
# imagePullSecrets: []
# # - name:
# How many web UI instances to run
# web:
# replicacount: 1
# resources:
# limits:
# cpu: 500m
# memory: 500Mi
# requests:
# cpu: 300m
# memory: 300Mi
# env:
# - name: GITHUB_APP_ID
# value:
# - name: GITHUB_API_SECRET
# value:
# nodeSelector: {}
# tolerations: []
# affinity: {}
# probeInitialDelaySeconds: 50
# priorityClassName: ""
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# How many cron instances to run
# cron:
# replicacount: 1
# resources:
# limits:
# cpu: 200m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# How many worker instances to run
# worker:
# replicacount: 2
# resources:
# limits:
# cpu: 300m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# concurrency:
# Admin user to create
user:
# Indicated to create the admin user or not,
# Default is true as the initial installation.
create: true
email: admin
# BYO Email server
# TODO: Add exim4 template
# https://docs.sentry.io/server/installation/docker/#outbound-email
email:
from_address: sentry@sentry.itpartner.no
host: smtpgw.itpartner.no
port: 465
use_tls: false
user: utvikling
password: S0m3rp0m@de#21!
enable_replies: false
# Name of the service and what port to expose on the pod
# Don't change these unless you know what you're doing
service:
name: sentry
type: ClusterIP
# externalPort: 9000
# internalPort: 9000
# ## Service annotations
# ##
# annotations: {}
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
## Load Balancer allow-list
# loadBalancerSourceRanges: []
# Configure the location of Sentry artifacts
filestore:
# Set to one of filesystem, gcs or s3 as supported by Sentry.
backend: filesystem
filesystem:
path: /var/lib/sentry/files
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: managed-nfs-storage
accessMode: ReadWriteOnce
size: 10Gi
## Whether to mount the persistent volume to the Sentry worker and
## cron deployments. This setting needs to be enabled for some advanced
## Sentry features, such as private source maps. If you disable this
## setting, the Sentry workers will not have access to artifacts you upload
## through the web deployment.
## Please note that you may need to change your accessMode to ReadWriteMany
## if you plan on having the web, worker and cron deployments run on
## different nodes.
# persistentWorkers: false
## Point this at a pre-configured secret containing a service account. The resulting
## secret will be mounted at /var/run/secrets/google
# gcs:
# credentialsFile: credentials.json
# secretName:
# bucketName:
## Currently unconfigured and changing this has no impact on the template configuration.
# s3: {}
# accessKey:
# secretKey:
# bucketName:
## Configure ingress resource that allow you to access the
## Sentry installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
enabled: true
hostname: sentry.itpartner.no
## Ingress annotations
##
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# kubernetes.io/tls-acme: 'true'
tls:
- secretName: sentry-tls-cert
hosts:
- sentry.itpartner.no
# TODO: add support for plugins https://docs.sentry.io/server/plugins/
postgresql:
enabled: true
postgresqlDatabase: sentry
postgresqlUsername: postgres
postgresqlPassword: jdjiujh1212eo
# # Only used when internal PG is disabled
# # postgresHost: postgres
# # postgresPassword: postgres
# # postgresPort: 5432
# imageTag: "9.6"
# persistence:
# enabled: true
redis:
clusterDomain: kube2.local
# enabled: true
# Only used when internal redis is disabled
# host: redis
# Just omit the password field if your redis cluster doesn't use password
# password: redis
# port: 6379
# master:
# persistence:
# enabled: true
# If change pvc size redis.master.persistence.size: 20Gi
# config:
# configYml: ""
# sentryConfPy: ""
## Prometheus Exporter / Metrics
##
#metrics:
# enabled: true
# ## Configure extra options for liveness and readiness probes
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
# livenessProbe:
# enabled: true
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 2
# failureThreshold: 3
# successThreshold: 1
# readinessProbe:
# enabled: true
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 2
# failureThreshold: 3
# successThreshold: 1
# ## Metrics exporter resource requests and limits
# ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# # schedulerName:
# # Optional extra labels for pod, i.e. redis-client: "true"
# # podLabels: []
# service:
# type: ClusterIP
# labels: {}
# image:
# repository: prom/statsd-exporter
# tag: v0.10.5
# pullPolicy: IfNotPresent
# # Enable this if you're using https://github.com/coreos/prometheus-operator
# serviceMonitor:
# enabled: true
# ## Specify a namespace if needed
# # namespace: kube-system
# # fallback to the prometheus default unless specified
# # interval: 10s
# ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
# ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
# ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
# # selector:
# # app: prometheus
# #prometheus: kube-prometheus
## Provide affinity for hooks if needed
#hooks:
# affinity: {}
# dbInit:
# resources:
# # We setup 3000Mi for the memory limit because of a Sentry instance need at least 3Gb RAM to perform a migration process
# # reference: https://github.com/helm/charts/issues/15296
# limits:
# memory: 3200Mi
# requests:
# memory: 3000Mi

View File

@@ -0,0 +1,588 @@
# Available parameters and their default values for the Vault chart.
global:
# enabled is the master enabled switch. Setting this to true or false
# will enable or disable all the components within this chart by default.
enabled: true
# Image pull secret to use for registry authentication.
imagePullSecrets: []
# imagePullSecrets:
# - name: image-pull-secret
# TLS for end-to-end encrypted transport
tlsDisable: true
# If deploying to OpenShift
openshift: false
# Create PodSecurityPolicy for pods
psp:
enable: false
# Annotation for PodSecurityPolicy.
# This is a multi-line templated string map, and can also be set as YAML.
annotations: |
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
injector:
# True if you want to enable vault agent injection.
enabled: true
# If true, will enable a node exporter metrics endpoint at /metrics.
metrics:
enabled: false
# External vault server address for the injector to use. Setting this will
# disable deployment of a vault server along with the injector.
externalVaultAddr: ""
# image sets the repo and tag of the vault-k8s image to use for the injector.
image:
repository: "hashicorp/vault-k8s"
tag: "0.6.0"
pullPolicy: IfNotPresent
# agentImage sets the repo and tag of the Vault image to use for the Vault Agent
# containers. This should be set to the official Vault image. Vault 1.3.1+ is
# required.
agentImage:
repository: "vault"
tag: "1.5.4"
# Mount Path of the Vault Kubernetes Auth Method.
authPath: "auth/kubernetes"
# Configures the log verbosity of the injector. Supported log levels: Trace, Debug, Error, Warn, Info
logLevel: "info"
# Configures the log format of the injector. Supported log formats: "standard", "json".
logFormat: "standard"
# Configures all Vault Agent sidecars to revoke their token when shutting down
revokeOnShutdown: false
# namespaceSelector is the selector for restricting the webhook to only
# specific namespaces.
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector
# for more details.
# Example:
# namespaceSelector:
# matchLabels:
# sidecar-injector: enabled
namespaceSelector: {}
# Configures failurePolicy of the webhook. By default webhook failures are ignored.
# To block pod creation while webhook is unavailable, set the policy to `Fail` below.
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy
#
# failurePolcy: Fail
certs:
# secretName is the name of the secret that has the TLS certificate and
# private key to serve the injector webhook. If this is null, then the
# injector will default to its automatic management mode that will assign
# a service account to the injector to generate its own certificates.
secretName: null
# caBundle is a base64-encoded PEM-encoded certificate bundle for the
# CA that signed the TLS certificate that the webhook serves. This must
# be set if secretName is non-null.
caBundle: ""
# certName and keyName are the names of the files within the secret for
# the TLS cert and private key, respectively. These have reasonable
# defaults but can be customized if necessary.
certName: tls.crt
keyName: tls.key
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# extraEnvironmentVars is a list of extra environment variables to set in the
# injector deployment.
extraEnvironmentVars: {}
# KUBERNETES_SERVICE_HOST: kubernetes.default.svc
# Affinity Settings for injector pods
# This should be a multi-line string matching the affinity section of a
# PodSpec.
affinity: null
# Toleration Settings for injector pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: null
# nodeSelector labels for injector pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: null
# Priority class for injector pods
priorityClassName: ""
# Extra annotations to attach to the injector pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the injector pods
annotations: {}
server:
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec.
# By default no direct resource request is made.
image:
repository: "vault"
tag: "1.5.4"
# Overrides the default Image Pull Policy
pullPolicy: IfNotPresent
# Configure the Update Strategy Type for the StatefulSet
# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategyType: "OnDelete"
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# Ingress allows ingress services to be created to allow external access
# from Kubernetes to access Vault pods.
# If deployment is on OpenShift, the following block is ignored.
# In order to expose the service, use the route section below
ingress:
enabled: true
labels: {}
# traffic: external
annotations:
# |
cert-manager.io/cluster-issuer: letsencrypt-production
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: vault.k2.itpartner.no
paths: [ / ]
tls:
- secretName: vault-tls
hosts:
- vault.k2.itpartner.no
# OpenShift only - create a route to expose the service
# The created route will be of type passthrough
route:
enabled: false
labels: {}
annotations: {}
host: chart-example.local
# authDelegator enables a cluster role binding to be attached to the service
# account. This cluster role binding can be used to setup Kubernetes auth
# method. https://www.vaultproject.io/docs/auth/kubernetes.html
authDelegator:
enabled: true
# extraInitContainers is a list of init containers. Specified as a YAML list.
# This is useful if you need to run a script to provision TLS certificates or
# write out configuration files in a dynamic way.
extraInitContainers: null
# # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder,
# # which is defined in the volumes value.
# - name: oauthapp
# image: "alpine"
# command: [sh, -c]
# args:
# - cd /tmp &&
# wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz &&
# tar -xf oauthapp.xz &&
# mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp &&
# chmod +x /usr/local/libexec/vault/oauthapp
# volumeMounts:
# - name: plugins
# mountPath: /usr/local/libexec/vault
# extraContainers is a list of sidecar containers. Specified as a YAML list.
extraContainers: null
# shareProcessNamespace enables process namespace sharing between Vault and the extraContainers
# This is useful if Vault must be signaled, e.g. to send a SIGHUP for log rotation
shareProcessNamespace: false
# extraArgs is a string containing additional Vault server arguments.
extraArgs: ""
# Used to define custom readinessProbe settings
readinessProbe:
enabled: true
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 5
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to enable a livenessProbe for the pods
livenessProbe:
enabled: false
path: "/v1/sys/health?standbyok=true"
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 60
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to set the sleep time during the preStop step
preStopSleepSeconds: 5
# Used to define commands to run after the pod is ready.
# This can be used to automate processes such as initialization
# or boostrapping auth methods.
postStart: []
# - /bin/sh
# - -c
# - /vault/userconfig/myscript/run.sh
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars: {}
# GOOGLE_REGION: global
# GOOGLE_PROJECT: myproject
# GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json
# extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set.
# These variables take value from existing Secret objects.
extraSecretEnvironmentVars: []
# - envName: AWS_SECRET_ACCESS_KEY
# secretName: vault
# secretKey: AWS_SECRET_ACCESS_KEY
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`. The value below is
# an array of objects, examples are shown below.
extraVolumes: []
# - type: secret (or "configMap")
# name: my-secret
# path: null # default is `/vault/userconfig`
# volumes is a list of volumes made available to all containers. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumes: null
# - name: plugins
# emptyDir: {}
# volumeMounts is a list of volumeMounts for the main server container. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumeMounts: null
# - mountPath: /usr/local/libexec/vault
# name: plugins
# readOnly: true
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: null
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: null
# Enables network policy for server pods
networkPolicy:
enabled: false
# Priority class for server pods
priorityClassName: ""
# Extra labels to attach to the server pods
# This should be a YAML map of the labels to apply to the server pods
extraLabels: {}
# Extra annotations to attach to the server pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the server pods
annotations: {}
# Enables a headless service to be used by the Vault Statefulset
service:
enabled: true
# clusterIP controls whether a Cluster IP address is attached to the
# Vault service within Kubernetes. By default the Vault service will
# be given a Cluster IP address, set to None to disable. When disabled
# Kubernetes will create a "headless" service. Headless services can be
# used to communicate with pods directly through DNS instead of a round robin
# load balancer.
# clusterIP: None
# Configures the service type for the main Vault service. Can be ClusterIP
# or NodePort.
#type: ClusterIP
# If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank.
#nodePort: 30000
# Port on which Vault server is listening
port: 8200
# Target port to which the service should be mapped to
targetPort: 8200
# Extra annotations for the service definition. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the service.
annotations: {}
# This configures the Vault Statefulset to create a PVC for data
# storage when using the file or raft backend storage engines.
# See https://www.vaultproject.io/docs/configuration/storage/index.html to know more
dataStorage:
enabled: true
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/data"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# This configures the Vault Statefulset to create a PVC for audit
# logs. Once Vault is deployed, initialized and unseal, Vault must
# be configured to use this for audit logs. This will be mounted to
# /vault/audit
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: false
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/audit"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# Run Vault in "dev" mode. This requires no further setup, no state management,
# and no initialization. This is useful for experimenting with Vault without
# needing to unseal, store keys, et. al. All data is lost on restart - do not
# use dev mode for anything other than experimenting.
# See https://www.vaultproject.io/docs/concepts/dev-server.html to know more
dev:
enabled: false
# Run Vault in "standalone" mode. This is the default mode that will deploy if
# no arguments are given to helm. This requires a PVC for data storage to use
# the "file" backend. This mode is not highly available and should not be scaled
# past a single replica.
standalone:
enabled: "-"
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data
# and store data there. This is only used when using a Replica count of 1, and
# using a stateful set. This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# Run Vault in "HA" mode. There are no storage requirements unless audit log
# persistence is required. In HA mode Vault will configure itself to use Consul
# for its storage backend. The default configuration provided will work the Consul
# Helm project by default. It is possible to manually configure Vault to use a
# different HA backend.
ha:
enabled: false
replicas: 3
# Set the api_addr configuration for Vault HA
# See https://www.vaultproject.io/docs/configuration#api_addr
# If set to null, this will be set to the Pod IP Address
apiAddr: null
# Enables Vault's integrated Raft storage. Unlike the typical HA modes where
# Vault's persistence is external (such as Consul), enabling Raft mode will create
# persistent volumes for Vault to store data according to the configuration under server.dataStorage.
# The Vault cluster will coordinate leader elections and failovers internally.
raft:
# Enables Raft integrated storage
enabled: false
# Set the Node Raft ID to the name of the pod
setNodeId: false
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a Consul for its HA storage backend.
# This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
service_registration "kubernetes" {}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# A disruption budget limits the number of pods of a replicated application
# that are down simultaneously from voluntary disruptions
disruptionBudget:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of
# replicas. If you'd like a custom value, you can specify an override here.
maxUnavailable: null
# Definition of the serviceAccount used to run Vault.
# These options are also used when using an external Vault server to validate
# Kubernetes tokens.
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Extra annotations for the serviceAccount definition. This can either be
# YAML or a YAML-formatted multi-line templated string map of the
# annotations to apply to the serviceAccount.
annotations: {}
# Settings for the statefulSet used to run Vault.
statefulSet:
# Extra annotations for the statefulSet. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the statefulSet.
annotations: {}
# Vault UI
ui:
# True if you want to create a Service entry for the Vault UI.
#
# serviceType can be used to control the type of service created. For
# example, setting this to "LoadBalancer" will create an external load
# balancer (for supported K8S installations) to access the UI.
enabled: false
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: false
serviceType: "ClusterIP"
serviceNodePort: null
externalPort: 8200
# loadBalancerSourceRanges:
# - 10.0.0.0/16
# - 1.78.23.3/32
# loadBalancerIP:
# Extra annotations to attach to the ui service
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the ui service
annotations: {}

View File

@@ -0,0 +1,24 @@
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: letsencrypt-production
nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
rules:
- host: dashboard.k2.itpartner.no
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
tls:
- hosts:
- dashboard.k2.itpartner.no
secretName: kubernetes-dashboard-tls

View File

@@ -9,6 +9,6 @@ items:
labels:
app: grafana
name: grafana-ldap-toml
namespace: kube-system
namespace: prometheus
data:
ldap-toml: @grafana_ldap_toml@

View File

@@ -9,7 +9,7 @@ items:
labels:
app: grafana
name: grafana-smtp
namespace: kube-system
namespace: prometheus
data:
user: @grafana_smtp_user@
password: @grafana_smtp_password@

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: sentry-postgresql
chart: postgresql-6.5.0
heritage: Helm
release: sentry
name: sentry-sentry-postgresql
namespace: kube-system
type: Opaque
data:
postgresql-password: a1pyWlBCazVzSQ==

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
TOP=@out@/share/kube-system-bootstrap
dest=kube-system-bootstrap
TOP=@out@/share/kubernetes-config
dest=kubernetes-config
[ $# = 1 ] && dest=$1
cp -r $TOP $dest
chmod -R ug+w $dest

View File

@@ -16,15 +16,15 @@ in
default = null;
};
domain = mkOption {
type = types.str;
default = "local";
};
# domain = mkOption {
# type = types.str;
# default = "local";
# };
searchDomains = mkOption {
type = types.listOf types.str;
default = [ cfg.domain ];
};
# searchDomains = mkOption {
# type = types.listOf types.str;
# default = [ cfg.domain ];
# };
cert = mkOption {
type = types.attrs;
@@ -78,7 +78,7 @@ in
};
imports = [
./nixos/configuration.nix
../nixos/configuration.nix
];
}

View File

View File

@@ -1,8 +0,0 @@
#!/usr/bin/env bash
pod=`kubectl get pods -n mssql | grep Running | grep consto-ks | cut -d' ' -f1`
bak=`kubectl exec -n mssql $pod ls -- -1 /var/opt/mssql/data/ | grep '.bak$'`
for i in $bak; do
kubectl cp mssql/$pod:/var/opt/mssql/data/$i .
done

View File

@@ -1 +0,0 @@
for i in (seq 2 5); ssh k0- docker system prune -a;end

View File

@@ -1,33 +0,0 @@
#!/usr/bin/env bash
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
if [ x$1 = x ]; then
ehco "usage: install-namespace.sh {namespace}"
exit 1
fi
namespace=$1
tmpfile=/tmp/new-$namespace.$$
cat << EOF > $tmpfile
apiVersion: v1
kind: Namespace
metadata:
labels:
name: $namespace
name: $namespace
---
apiVersion: v1
kind: Secret
metadata:
name: gitlab-registry-auth
namespace: $namespace
data:
.dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL3JlZ2lzdHJ5Lml0cGFydG5lci5ubyI6eyJ1c2VybmFtZSI6ImpvbmFzIiwicGFzc3dvcmQiOiJTRldwLVk0bkVfdXpNZFJxeHp6SyIsImF1dGgiOiJhbTl1WVhNNlUwWlhjQzFaTkc1RlgzVjZUV1JTY1hoNmVrcz0ifX19
type: kubernetes.io/dockerconfigjson
EOF
kubectl apply -f $tmpfile
rm $tmpfile

View File

@@ -1,3 +0,0 @@
#!/usr/bin/env bash
kubectl delete secrets --all-namespaces --field-selector='type=kubernetes.io/service-account-token'

View File

@@ -1,3 +0,0 @@
#!/bin/sh
kubectl taint node $1 ClusterService="true":NoSchedule

View File

@@ -1,12 +0,0 @@
#!/bin/sh
host=$1; shift
curl -i -N \
-H "Connection: upgrade"\
-H "Upgrade: websocket"\
-H "Sec-WebSocket-Key: SGVsbG8sIHdvcmxkIQ=="\
-H "Sec-WebSocket-Version: 13"\
-H "Origin: http://foo.com/"\
-H "Host: $host" $@