Reorganize project

This commit is contained in:
Jonas Juselius
2020-11-05 10:02:01 +01:00
parent 4876de1547
commit 6fea8b3bc8
57 changed files with 1106 additions and 319 deletions

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
if [ x$1 = x ]; then
ehco "usage: setup-namespace.sh {namespace}"
exit 1
fi
namespace=$1
tmpfile=/tmp/helm-$namespace.$$
cat << EOF > $tmpfile
apiVersion: v1
kind: Namespace
metadata:
labels:
name: $namespace
name: $namespace
---
apiVersion: v1
metadata:
name: gitlab-pull-secret
namespace: $namespace
kind: Secret
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: ewoJImF1dGhzIjogewoJCSJyZWdpc3RyeS5naXRsYWIuY29tIjogewoJCQkiYXV0aCI6ICJaMmwwYkdGaUsyUmxjR3h2ZVMxMGIydGxiaTB4T1Rnd01qQTZPRmxqU0VoMFZIaENSVUZUTFZKUWRsSnJXbGM9IgoJCX0KCX0sCgkiSHR0cEhlYWRlcnMiOiB7CgkJIlVzZXItQWdlbnQiOiAiRG9ja2VyLUNsaWVudC8xOS4wMy4xMiAobGludXgpIgoJfQp9Cg==
---
apiVersion: v1
kind: Secret
metadata:
name: kestrel-tls
namespace: $namespace
type: Opaque
data:
kestrel.pfx: MIIJcQIBAzCCCTcGCSqGSIb3DQEHAaCCCSgEggkkMIIJIDCCA9cGCSqGSIb3DQEHBqCCA8gwggPEAgEAMIIDvQYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQINE11xXT7iV4CAggAgIIDkBMylQRDdNJTEryjKEYajwYVWfkJDmEXfToulTYOU1Jv1q7z+le15hCGwauS/yDRCS4QjcTmW+XT7MopnqLlVXDF2dZbk+a1ThTiaToTqXbRWpI2sfzuFjbA6cYPJNonBDKKNwUmewnAog37u9qaQk2MCsaUw6t7pBp7HpvtnVR/GbsbY98udx6kqATlyZtNnhg8QhgTF9dfGf7VeQj0wq1gGaiGXq0kNJBwod7my8caQD3gUtRQf0ZKZN6RF8r2a4mjf0YyOBsLtSbZ7bceHtdN5PlOi1wu47XSAhSzqNHpNM8K4o0HvMol1m8QzQSmM6KY6vOrTagX+rSejV5aX82gdpiNjRa8HGO5+S8oRsL2/xX2FoxzUCkpzjyoAHJ6Bd25tem/ls4l921hlVmHZuseMiuwMisBw+bTYXEmug3SK54wkZi0nkjRAUTTZRy5KqYWDYXzxuT6MZPiROQRv66PpAG6IPtnhv0iIyszwAlYf6zZcT8Xlh6M9tMPuDFEKMzUbff8/FUWPrLLAZIuPC1PjbmkQ+bCrqN2JkDoJKbJjs8FEvq45vaG/R9rKnWeXakbrcKt7iEVQRUynHfXheZMPfhyB2QBS5gO5mjVLx062Lf+4h5oAf43Kbu5iGfYDTQHazW1jfMCfq87ufvMVlAlqJ0TQCUDPcDjW0o5MAv5wJibOciw5IJ/AEXV42apWUsei2sKB62JcFSiwUc+7a4QcCh0Cn4pgBjpi4T9v0mOWOCcu26IeJPeBpAW+4fgMfmiL5AfGCeY4YNiTrK0yHaUBA7kLCXCPKUHKYP71WkVeuooih0yJJZ/ZqWN+aIOm0c/DPAjUgkEVVtZXescW2Ae0NgdMhMeJ5kfsPYlTeOtFwzoSRu8wMPBr/Ufg2aEWc5GaZRbQmFzvmcg9aPFpltQ0XXGyaD+c6JR2t5b5YgH5MLRh5uZmYhFIBwBHIUQZ9Sc+7pjHUt9TnnVz3fT72pGi47Py5mm0W95euC2YucqclSQ7wjj4OKqgNKDp4o/ALZaZUURZeLl8xwsQ9liAiw2hEw9tvFvdWb9RM3wVEU5ol9n0OnReOSzYDMfUaUxiVTnA8r3SuavdbsuiyWpZ6lJJQRuwNhUfVat+c39OamXbe1J9E7wRDxAISKE2+JofaTOublkMNsaxP2TYDs9xDL/0oCHCxyu2hoCyOK8b94gw2yyW/+UdqUxLHdBgEvEbjCCBUEGCSqGSIb3DQEHAaCCBTIEggUuMIIFKjCCBSYGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAi7oqckaHZOQQICCAAEggTIlVZIQOhHhHchiWYqwlt9zdOP21UU3v9ghDAwuimISuLxTyfr+HXC9+5TRW5eKygiBv9czjZuZcUTQxHLlW2KQmz5EVik/CeAbhmgK3XL9YGTZxGccSNGaS9dLvxWVDbeCoNK7rr9R3zV4vWLPerfGjezpT/VWHLie0pBaNbHg8GHhwlLBSzo+ODkSD1N1cf/6wAny3Cdf8mwgRVmT5dQ7DVfnpkXNQ5Mztu8faLeMjURU2XCZecOZ4vVxr5cCCnOn4vWMPxwGeJUQVVt1M1BFyLg3DyTBpHST0qkV4PKDESjmha8d0zc+ifKr10e1a8LU9KHWsb8L0in2gTor8F9QwoYhtx2UQdx4n4qQ3GKoR0nfBl3aZw97hdPJgDKKoM1BFpXMg3LRMQSL/0FP2JfKQt/Qni3vvGE5A207ouMU+0G9qHdniR7DVmbmDEOTJQgbeYivLPOuYlXSx2aK51VUxO+agOnxLg8RYqOo8Ex7ZtnxjpfQQVirMJ3yPdD3cqUVLiJ/Y5xFOvwjoLNdlxxTC+QgsCN1K+Vg3KY+pn8d/iAmrMfUs992jz0xwXrUBG6V2lrXE6dI3SpTT8/h265Cd8KjHiXSJOP02sHm0PWTVQLAeJluV6DLgHB73jQ6fb01AbVkbG8lL2fhWl1R5cD7OdAIP7n6FgoSrzA/eqKTLYPKIbA73HdPCMm2zzb5mdlo8rh+0FJ8vegRJ4DFGag/FpAJRxeUnyLyO29tOKpp/u9uwrYDooYn5ci5gLeehUfGqEJlNp8GPbqiKAkWwNLaNguJA+kT0v7XVoEdNkDB6upObyJOObHl1W6s5vHFarNojcNINgTV3sEJT5tDuLZ282Lw99Wg1lvUGJbdO3dgq5NLey11cmRTBR/KyqSoYPGUfC8Aihfo/djUbXUjs/I6WuPRoqcSzQsjqHt7hR7aHF2ahKw2WxQhT27jgUpTcgd1O00uOsb3BPhskpY9ggRNs9AaVJu2RHyxwX2TWkY/AmMYG8UIRmKkAzLctUVwSIEY27GbluvSLtIhVl4I7DV8SYcWkxu/1NoT8aMlQMJeULgFMzG49GWnJDOgOXxcTRUFL3hniisWU5h8PPNxIGLqYf4C5ocXPCg7sap+6IrEm3lP9nwXlhMfHOMbKRX5p0W+0bzEtf4sZlwt6An5+WJmIP0oegz7tzsJNzEiDShK0TaEgfRyBi+NM781zGOCN7X4Lvzl3L5CAAtRGhfYMH52X7vf70Gf74wREa75O91NurJTaRMlztWoA7vAI2maYoPO9wyBWIsQDyv4cmL3xCai0TIza7Wtu8SHKnJCKGp90fftNU8PNlN6StVi2y8VKY+whRqFcZR1dbx+ClsPOHAcosNkZ3Vv9EuieSZaCSNT7dOvCiSgVgzuLg3CC/SBPfzVeqaoLL4xMVtfKXqZfX7SDoftNlD8rlY+hHR1pdxpvKhZPIgDkMRAZ8Z6IWKWdyRACeNM5NWK56e9D8zm5VDKDZCz1n/zozH577ABvj6+dZkahl/FKpryFxY4qtKxnYXXd8DVYt6t1NFIz3Ybov3r/7fHYqm8OLmF9FZqeC4gqr9HUbuDkaU0mPCHtWrL2nkhhuSKR4sm2VfhUSegJkiTKvD5+DhpIaDMSUwIwYJKoZIhvcNAQkVMRYEFJpr2WGeI1IjCffN9Qs1YLuF26qUMDEwITAJBgUrDgMCGgUABBRuJCgviB/YoTN9wqikECF7WyAN9QQI/4JQvFeDBswCAggA
EOF
kubectl apply -f $tmpfile
rm $tmpfile

View File

@@ -0,0 +1 @@
for i in (seq 2 5); ssh k0- docker system prune -a;end

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bash
token=UTjgSspYQcX-BVUd1UsC
api=https://gitlab.com/api/v4
prune () {
id=$1
reg=$(curl -s --header "PRIVATE-TOKEN: $token" \
"$api/projects/$id/registry/repositories" \
| json_pp | sed -n 's/^ *"id" *: *\([0-9]\+\).*/\1/p')
for i in $reg; do
curl -s --request DELETE --data 'keep_n=10' \
--data 'name_regex=.*[0-9].*' \
--header "PRIVATE-TOKEN: $token" \
"$api/projects/$id/registry/repositories/$i/tags"
done
}
gc () {
pod=$(kubectl get pod -n gitlab -lapp=registry | tail -1 | cut -d' ' -f1)
kubectl exec -n gitlab $pod -- \
registry garbage-collect /etc/docker/registry/config.yml -m
}
all () {
groups=$(curl -s --header "PRIVATE-TOKEN: $token" "$api/groups" \
| json_pp | sed -n 's/^ *"id" *: *\([0-9]\+\).*/\1/p')
for g in $groups; do
proj=$(curl -s --header "PRIVATE-TOKEN: $token" \
"$api/groups/$g/projects?simple=true&include_subgroups=true" \
| json_pp | sed -n 's/^ \{6\}"id" *: *\([0-9]\+\).*/\1/p')
for p in $proj; do
prune $p
done
done
}
projects () {
for i in $@; do
prune $(echo $i | sed 's,/,%2F,g')
done
}
case $1 in
--all) all ;;
*) projects $@
esac
gc

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env bash
TOP=@out@/share/kube-system-bootstrap
ca=@initca@
apiserver="@apiserver@"
filseserver="@fileserver@"
grafana_ldap_toml="@grafana_ldap_toml@"
apply_configs () {
d=$TOP/config
configs[0]=$d/cluster-auth-rbac.yaml
configs[1]=$d/kube-proxy.yaml
configs[2]=$d/front-proxy-client.yaml
configs[3]=$d/grafana-smtp-secret.yaml
[ ! -z $grafana_ldap_toml ] && configs[4]=$d/grafana-ldap-toml.yaml
kubectl delete secret cluster-ca -n kube-system >/dev/null 2>&1
kubectl create secret tls cluster-ca \
--namespace=kube-system --cert=${ca}/ca.pem --key=${ca}/ca-key.pem
for i in ${configs[@]}; do
kubectl apply -f $i
done
}
install_certmgr () {
kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml
helm repo add jetstack https://charts.jetstack.io
helm install -n kube-system -f $TOP/charts/cert-manager.yaml \
cert-manager jetstack/cert-manager
}
helm_install () {
echo "helm install $1"
helm install -n kube-system -f $TOP/charts/$1.yaml $1 stable/$1
}
helm_delete () {
echo "helm delete existing $1"
helm delete -n kube-system $1
}
install_prometheus () {
helm_delete prometheus-operator
yaml=/tmp/prometheus-operator.yaml
cp $TOP/charts/prometheus-operator.yaml $yaml
chmod 640 $yaml
# disable ldap for grafana
[ -z $grafana_ldap_toml ] && \
sed -i '/auth\.ldap:/,+1 s/true/false/; /ldap:/,+1 d' $yaml
# disable storage
[ -z $fileserver ] && \
sed -i '/prometheusSpec:/,+10d' $yaml
helm_install prometheus-operator $yaml
}
install_charts () {
[ ! -z $fileserver ] && charts[0]=nfs-client-provisioner
charts[1]=nginx-ingress
charts[2]=metrics-server
charts[3]=kubernetes-dashboard
for i in ${charts[@]};do
helm_install $i
sleep 30
done
}
install_prometheus_crds () {
url=https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds
kubectl apply -f $url/crd-alertmanager.yaml
kubectl apply -f $url/crd-prometheus.yaml
kubectl apply -f $url/crd-prometheusrules.yaml
kubectl apply -f $url/crd-servicemonitor.yaml
kubectl apply -f $url/crd-podmonitor.yaml
}
helm repo add stable https://kubernetes-charts.storage.googleapis.com
helm repo update
apply_configs
install_prometheus_crds
install_certmgr
install_charts
install_prometheus
# helm install -n kube-system -f sentry.yaml --wait --timeout=1000s sentry stable/sentry
# helm install -n vault -f vault-values.yaml vault hashicorp/vault
# helm install -n monitoring -f kube-prometheus-stack.yaml prometheus prometheus-community/kube-prometheus-stack
# vim:ft=sh

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
kubectl delete secrets --all-namespaces --field-selector='type=kubernetes.io/service-account-token'

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
master="etcd.service"
node="flannel.service"
nodes=$(kubectl get nodes --no-headers | cut -d' ' -f1)
master_node=$(echo $nodes | cut -d' ' -f1)
echo "$master_node: systemctl restart $master"
sudo systemctl restart $master
for n in $nodes; do
echo "$n: systemctl restart $node"
ssh root@$n systemctl restart $node &
done
echo "Waiting..."
wait

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
master="kube-apiserver kube-scheduler kube-controller-manager"
node="kube-proxy kubelet kube-certmgr-apitoken-bootstrap"
nodes=$(kubectl get nodes --no-headers | cut -d' ' -f1)
master_node=$(echo $nodes | cut -d' ' -f1)
echo "$master_node: systemctl restart $master"
sudo systemctl restart $master
for n in $nodes; do
echo "$n: systemctl restart $node"
ssh root@$n systemctl restart $node &
done
echo "Waiting..."
wait

View File

@@ -0,0 +1,12 @@
read -r -d '' repos << EOF
jetstack;https://charts.jetstack.io
stable;https://kubernetes-charts.storage.googleapis.com/
minio;https://helm.min.io/
anchore;https://charts.anchore.io
bitnami;https://charts.bitnami.com/bitnami
hashicorp;https://helm.releases.hashicorp.com
ingress-nginx;https://kubernetes.github.io/ingress-nginx
prometheus-community;https://prometheus-community.github.io/helm-charts
EOF
for i in $repos; do IFS=";"; set $i; helm repo add $1 $2; done

View File

@@ -0,0 +1,3 @@
#!/bin/sh
kubectl taint node $1 ClusterService="true":NoSchedule

View File

@@ -0,0 +1,12 @@
#!/bin/sh
host=$1; shift
curl -i -N \
-H "Connection: upgrade"\
-H "Upgrade: websocket"\
-H "Sec-WebSocket-Key: SGVsbG8sIHdvcmxkIQ=="\
-H "Sec-WebSocket-Version: 13"\
-H "Origin: http://foo.com/"\
-H "Host: $host" $@

View File

@@ -0,0 +1,24 @@
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
prometheus:
enabled: true
servicemonitor:
enabled: true
prometheusInstance: default
targetPort: 9402
path: /metrics
interval: 60s
scrapeTimeout: 30s
labels: {}
webhook:
enabled: true
replicaCount: 1
cainjector:
enabled: true

View File

@@ -0,0 +1,140 @@
# Default values for coredns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: coredns/coredns
tag: "1.6.4"
pullPolicy: IfNotPresent
replicaCount: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
serviceType: "ClusterIP"
prometheus:
monitor:
enabled: true
additionalLabels: {}
namespace: ""
service:
clusterIP: 10.0.0.254
# loadBalancerIP: ""
# externalTrafficPolicy: ""
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "9153"
serviceAccount:
create: true
# The name of the ServiceAccount to use
# If not set and create is true, a name is generated using the fullname template
name:
rbac:
# If true, create & use RBAC resources
create: true
# If true, create and use PodSecurityPolicy
pspEnable: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name:
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
isClusterService: true
# Optional priority class to be used for the coredns pods
priorityClassName: ""
servers:
- zones:
- zone: .
port: 53
plugins:
- name: cache
parameters: 30
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: @cluster@.local
- name: loadbalance
parameters: round_robin
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
# Complete example with all the options:
# - zones: # the `zones` block can be left out entirely, defaults to "."
# - zone: hello.world. # optional, defaults to "."
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
# - zone: foo.bar.
# scheme: dns://
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
# # Note that this will not work if you are also exposing tls or grpc on the same server
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
# plugins: # the plugins to use for this server block
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
# parameters: foo bar # list of parameters after the plugin
# configBlock: |- # if the plugin supports extra block style config, supply it here
# hello world
# foo bar
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
zoneFiles: []
# - filename: example.db
# domain: example.com
# contents: |
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
# example.com. IN NS b.iana-servers.net.
# example.com. IN NS a.iana-servers.net.
# example.com. IN A 192.168.99.102
# *.example.com. IN A 192.168.99.102
# optonal array of secrets to mount inside coredns container
# possible usecase: need for secure connection with etcd backend
extraSecrets: []
# - name: etcd-client-certs
# mountPath: /etc/coredns/tls/etcd
# - name: some-fancy-secret
# mountPath: /etc/wherever

View File

@@ -0,0 +1,45 @@
# Default values for kubernetes-dashboard
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
## Enable possibility to skip login
enableSkipLogin: false
## Serve application over HTTP without TLS
enableInsecureLogin: false
## Additional container arguments
extraArgs:
- --token-ttl=0
rbac:
clusterAdminRole: true
service:
type: ClusterIP
externalPort: 443
annotations:
service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}'
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
nginx.org/ssl-services: kubernetes-dashboard
nginx.ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# nginx.ingress.kubernetes.io/ssl-passthrough: "false"
paths:
- /
hosts:
- dashboard.@cluster@.local
tls:
- hosts:
- dashboard.@cluster@.local
secretName: kubernetes-dashboard-tls-cert

View File

@@ -0,0 +1,11 @@
image.pullPolicy: Always
rbac:
create: true
apiService:
create: true
hostNetwork:
enabled: true

View File

@@ -0,0 +1,46 @@
# helm repo add minio https://helm.min.io/
# helm install --version 6.0.5 -f minio.yaml -n minio minio minio/minio
accessKey: Mkd324ijlnfll23883
secretKey: KJQfefrnflol93jpj31mrkjs3i88sj2L
# environment:
# MINIO_ACCESS_KEY_OLD: YOURACCESSKEY
# MINIO_SECRET_KEY_OLD: YOURSECRETKEY
defaultBucket:
enabled: true
name: default
policy: none
purge: false
buckets:
- name: serit
policy: none
purge: false
- name: gitlab
policy: none
purge: false
clusterDomain: kube2.local
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/whitelist-source-range: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
hosts:
- minio.k2.local
tls:
- hosts:
- minio.k2.local
secretName: minio-tls
persistence:
enabled: true
size: 100Gi
storageClass: managed-nfs-storage

View File

@@ -0,0 +1,13 @@
image:
tag: latest
storageClass:
name: managed-nfs-storage
defaultClass: true
reclaimPolicy: Delete
archiveOnDelete: true
nfs:
server: @fileserver@
path: /@cluster@
mountOptions:
- nfsvers=4.1

View File

@@ -0,0 +1,69 @@
## nginx configuration
## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md
##
controller:
kind: Deployment
hostNetwork: true
service:
type: NodePort
nodePorts:
http: 30080
https: 30443
targetPorts:
http: http
https: https
tolerations:
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- key: unschedulable
operator: Equal
value: "true"
effect: NoSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: [ @apiserver@ ]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nginx-ingress
- key: component
operator: In
values:
- controller
topologyKey: kubernetes.io/hostname
namespaces: []
replicaCount: 1
minAvailable: 1
metrics:
enabled: true
serviceMonitor.enabled: true
# TCP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
##
tcp: {}
# 8080: "default/example-tcp-svc:9000"
# UDP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
##
udp: {}
# 53: "kube-system/kube-dns:53"

View File

@@ -0,0 +1,169 @@
alertmanager:
## Deploy alertmanager
##
enabled: true
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
## Hosts must be provided if Ingress is enabled.
##
hosts:
- alertmanager.@cluster@.local
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
##
paths: []
# - /
## TLS configuration for Alertmanager Ingress
## Secret must be manually created in the namespace
##
tls:
- secretName: alertmanager-general-tls
hosts:
- alertmanager.@cluster@.local
grafana:
enabled: true
defaultDashboardsEnabled: true
adminPassword: prom-operator
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
hosts:
- grafana.@cluster@.local
path: /
tls:
- secretName: grafana-general-tls
hosts:
- grafana.@cluster@.local
grafana.ini:
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
auth.ldap:
enabled: true
allow_sign_up: true
config_file: /etc/grafana/ldap.toml
smpt:
enabled: true
host: smtpgw.itpartner.no
port: 465
user: utvikling
skip_verify: true
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
existingSecret: grafana-ldap-toml
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: grafana-smtp
userKey: user
passwordKey: password
kubeApiServer:
enabled: true
tlsConfig:
insecureSkipVerify: true
kubelet:
enabled: true
namespace: kube-system
coreDns:
enabled: true
kubeEtcd:
enabled: true
serviceMonitor:
insecureSkipVerify: true
endpoints:
- @apiserverAddress@
kubeControllerManager:
enabled: true
serviceMonitor:
insecureSkipVerify: true
endpoints:
- @apiserverAddress@
kubeScheduler:
enabled: true
serviceMonitor:
insecureSkipVerify: true
endpoints:
- @apiserverAddress@
kubeProxy:
enabled: false
endpoints:
- @apiserverAddress@
@workers@
kubeStateMetrics:
enabled: true
nodeExporter:
enabled: true
prometheusOperator:
enabled: true
prometheus:
enabled: true
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
hosts:
- prometheus.@cluster@.local
paths: []
tls:
- secretName: prometheus-general-tls
hosts:
- prometheus.@cluster@.local
prometheusSpec:
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: managed-nfs-storage
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
selector: {}

View File

@@ -0,0 +1,287 @@
# helm install --namespace kube-system --timeout 1000 -f sentry.yaml sentry stable/sentry
# image:
# repository: sentry
# tag: 9
# pullPolicy: IfNotPresent
# # Add the secret name to pull from a private registry.
# imagePullSecrets: []
# # - name:
# How many web UI instances to run
# web:
# replicacount: 1
# resources:
# limits:
# cpu: 500m
# memory: 500Mi
# requests:
# cpu: 300m
# memory: 300Mi
# env:
# - name: GITHUB_APP_ID
# value:
# - name: GITHUB_API_SECRET
# value:
# nodeSelector: {}
# tolerations: []
# affinity: {}
# probeInitialDelaySeconds: 50
# priorityClassName: ""
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# How many cron instances to run
# cron:
# replicacount: 1
# resources:
# limits:
# cpu: 200m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# How many worker instances to run
# worker:
# replicacount: 2
# resources:
# limits:
# cpu: 300m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# concurrency:
# Admin user to create
user:
# Indicated to create the admin user or not,
# Default is true as the initial installation.
create: true
email: admin
# BYO Email server
# TODO: Add exim4 template
# https://docs.sentry.io/server/installation/docker/#outbound-email
email:
from_address: sentry@sentry.itpartner.no
host: smtpgw.itpartner.no
port: 465
use_tls: false
user: utvikling
password: S0m3rp0m@de#21!
enable_replies: false
# Name of the service and what port to expose on the pod
# Don't change these unless you know what you're doing
service:
name: sentry
type: ClusterIP
# externalPort: 9000
# internalPort: 9000
# ## Service annotations
# ##
# annotations: {}
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
## Load Balancer allow-list
# loadBalancerSourceRanges: []
# Configure the location of Sentry artifacts
filestore:
# Set to one of filesystem, gcs or s3 as supported by Sentry.
backend: filesystem
filesystem:
path: /var/lib/sentry/files
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: managed-nfs-storage
accessMode: ReadWriteOnce
size: 10Gi
## Whether to mount the persistent volume to the Sentry worker and
## cron deployments. This setting needs to be enabled for some advanced
## Sentry features, such as private source maps. If you disable this
## setting, the Sentry workers will not have access to artifacts you upload
## through the web deployment.
## Please note that you may need to change your accessMode to ReadWriteMany
## if you plan on having the web, worker and cron deployments run on
## different nodes.
# persistentWorkers: false
## Point this at a pre-configured secret containing a service account. The resulting
## secret will be mounted at /var/run/secrets/google
# gcs:
# credentialsFile: credentials.json
# secretName:
# bucketName:
## Currently unconfigured and changing this has no impact on the template configuration.
# s3: {}
# accessKey:
# secretKey:
# bucketName:
## Configure ingress resource that allow you to access the
## Sentry installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
enabled: true
hostname: sentry.itpartner.no
## Ingress annotations
##
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# kubernetes.io/tls-acme: 'true'
tls:
- secretName: sentry-tls-cert
hosts:
- sentry.itpartner.no
# TODO: add support for plugins https://docs.sentry.io/server/plugins/
postgresql:
enabled: true
postgresqlDatabase: sentry
postgresqlUsername: postgres
postgresqlPassword: jdjiujh1212eo
# # Only used when internal PG is disabled
# # postgresHost: postgres
# # postgresPassword: postgres
# # postgresPort: 5432
# imageTag: "9.6"
# persistence:
# enabled: true
redis:
clusterDomain: kube2.local
# enabled: true
# Only used when internal redis is disabled
# host: redis
# Just omit the password field if your redis cluster doesn't use password
# password: redis
# port: 6379
# master:
# persistence:
# enabled: true
# If change pvc size redis.master.persistence.size: 20Gi
# config:
# configYml: ""
# sentryConfPy: ""
## Prometheus Exporter / Metrics
##
#metrics:
# enabled: true
# ## Configure extra options for liveness and readiness probes
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
# livenessProbe:
# enabled: true
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 2
# failureThreshold: 3
# successThreshold: 1
# readinessProbe:
# enabled: true
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 2
# failureThreshold: 3
# successThreshold: 1
# ## Metrics exporter resource requests and limits
# ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# # schedulerName:
# # Optional extra labels for pod, i.e. redis-client: "true"
# # podLabels: []
# service:
# type: ClusterIP
# labels: {}
# image:
# repository: prom/statsd-exporter
# tag: v0.10.5
# pullPolicy: IfNotPresent
# # Enable this if you're using https://github.com/coreos/prometheus-operator
# serviceMonitor:
# enabled: true
# ## Specify a namespace if needed
# # namespace: kube-system
# # fallback to the prometheus default unless specified
# # interval: 10s
# ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
# ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
# ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
# # selector:
# # app: prometheus
# #prometheus: kube-prometheus
## Provide affinity for hooks if needed
#hooks:
# affinity: {}
# dbInit:
# resources:
# # We setup 3000Mi for the memory limit because of a Sentry instance need at least 3Gb RAM to perform a migration process
# # reference: https://github.com/helm/charts/issues/15296
# limits:
# memory: 3200Mi
# requests:
# memory: 3000Mi

View File

@@ -0,0 +1,588 @@
# Available parameters and their default values for the Vault chart.
global:
# enabled is the master enabled switch. Setting this to true or false
# will enable or disable all the components within this chart by default.
enabled: true
# Image pull secret to use for registry authentication.
imagePullSecrets: []
# imagePullSecrets:
# - name: image-pull-secret
# TLS for end-to-end encrypted transport
tlsDisable: true
# If deploying to OpenShift
openshift: false
# Create PodSecurityPolicy for pods
psp:
enable: false
# Annotation for PodSecurityPolicy.
# This is a multi-line templated string map, and can also be set as YAML.
annotations: |
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
injector:
# True if you want to enable vault agent injection.
enabled: true
# If true, will enable a node exporter metrics endpoint at /metrics.
metrics:
enabled: false
# External vault server address for the injector to use. Setting this will
# disable deployment of a vault server along with the injector.
externalVaultAddr: ""
# image sets the repo and tag of the vault-k8s image to use for the injector.
image:
repository: "hashicorp/vault-k8s"
tag: "0.6.0"
pullPolicy: IfNotPresent
# agentImage sets the repo and tag of the Vault image to use for the Vault Agent
# containers. This should be set to the official Vault image. Vault 1.3.1+ is
# required.
agentImage:
repository: "vault"
tag: "1.5.4"
# Mount Path of the Vault Kubernetes Auth Method.
authPath: "auth/kubernetes"
# Configures the log verbosity of the injector. Supported log levels: Trace, Debug, Error, Warn, Info
logLevel: "info"
# Configures the log format of the injector. Supported log formats: "standard", "json".
logFormat: "standard"
# Configures all Vault Agent sidecars to revoke their token when shutting down
revokeOnShutdown: false
# namespaceSelector is the selector for restricting the webhook to only
# specific namespaces.
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector
# for more details.
# Example:
# namespaceSelector:
# matchLabels:
# sidecar-injector: enabled
namespaceSelector: {}
# Configures failurePolicy of the webhook. By default webhook failures are ignored.
# To block pod creation while webhook is unavailable, set the policy to `Fail` below.
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy
#
# failurePolcy: Fail
certs:
# secretName is the name of the secret that has the TLS certificate and
# private key to serve the injector webhook. If this is null, then the
# injector will default to its automatic management mode that will assign
# a service account to the injector to generate its own certificates.
secretName: null
# caBundle is a base64-encoded PEM-encoded certificate bundle for the
# CA that signed the TLS certificate that the webhook serves. This must
# be set if secretName is non-null.
caBundle: ""
# certName and keyName are the names of the files within the secret for
# the TLS cert and private key, respectively. These have reasonable
# defaults but can be customized if necessary.
certName: tls.crt
keyName: tls.key
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# extraEnvironmentVars is a list of extra environment variables to set in the
# injector deployment.
extraEnvironmentVars: {}
# KUBERNETES_SERVICE_HOST: kubernetes.default.svc
# Affinity Settings for injector pods
# This should be a multi-line string matching the affinity section of a
# PodSpec.
affinity: null
# Toleration Settings for injector pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: null
# nodeSelector labels for injector pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: null
# Priority class for injector pods
priorityClassName: ""
# Extra annotations to attach to the injector pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the injector pods
annotations: {}
server:
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec.
# By default no direct resource request is made.
image:
repository: "vault"
tag: "1.5.4"
# Overrides the default Image Pull Policy
pullPolicy: IfNotPresent
# Configure the Update Strategy Type for the StatefulSet
# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategyType: "OnDelete"
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# Ingress allows ingress services to be created to allow external access
# from Kubernetes to access Vault pods.
# If deployment is on OpenShift, the following block is ignored.
# In order to expose the service, use the route section below
ingress:
enabled: true
labels: {}
# traffic: external
annotations:
# |
cert-manager.io/cluster-issuer: letsencrypt-production
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: vault.k2.itpartner.no
paths: [ / ]
tls:
- secretName: vault-tls
hosts:
- vault.k2.itpartner.no
# OpenShift only - create a route to expose the service
# The created route will be of type passthrough
route:
enabled: false
labels: {}
annotations: {}
host: chart-example.local
# authDelegator enables a cluster role binding to be attached to the service
# account. This cluster role binding can be used to setup Kubernetes auth
# method. https://www.vaultproject.io/docs/auth/kubernetes.html
authDelegator:
enabled: true
# extraInitContainers is a list of init containers. Specified as a YAML list.
# This is useful if you need to run a script to provision TLS certificates or
# write out configuration files in a dynamic way.
extraInitContainers: null
# # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder,
# # which is defined in the volumes value.
# - name: oauthapp
# image: "alpine"
# command: [sh, -c]
# args:
# - cd /tmp &&
# wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz &&
# tar -xf oauthapp.xz &&
# mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp &&
# chmod +x /usr/local/libexec/vault/oauthapp
# volumeMounts:
# - name: plugins
# mountPath: /usr/local/libexec/vault
# extraContainers is a list of sidecar containers. Specified as a YAML list.
extraContainers: null
# shareProcessNamespace enables process namespace sharing between Vault and the extraContainers
# This is useful if Vault must be signaled, e.g. to send a SIGHUP for log rotation
shareProcessNamespace: false
# extraArgs is a string containing additional Vault server arguments.
extraArgs: ""
# Used to define custom readinessProbe settings
readinessProbe:
enabled: true
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 5
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to enable a livenessProbe for the pods
livenessProbe:
enabled: false
path: "/v1/sys/health?standbyok=true"
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 60
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to set the sleep time during the preStop step
preStopSleepSeconds: 5
# Used to define commands to run after the pod is ready.
# This can be used to automate processes such as initialization
# or boostrapping auth methods.
postStart: []
# - /bin/sh
# - -c
# - /vault/userconfig/myscript/run.sh
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars: {}
# GOOGLE_REGION: global
# GOOGLE_PROJECT: myproject
# GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json
# extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set.
# These variables take value from existing Secret objects.
extraSecretEnvironmentVars: []
# - envName: AWS_SECRET_ACCESS_KEY
# secretName: vault
# secretKey: AWS_SECRET_ACCESS_KEY
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`. The value below is
# an array of objects, examples are shown below.
extraVolumes: []
# - type: secret (or "configMap")
# name: my-secret
# path: null # default is `/vault/userconfig`
# volumes is a list of volumes made available to all containers. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumes: null
# - name: plugins
# emptyDir: {}
# volumeMounts is a list of volumeMounts for the main server container. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumeMounts: null
# - mountPath: /usr/local/libexec/vault
# name: plugins
# readOnly: true
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: null
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: null
# Enables network policy for server pods
networkPolicy:
enabled: false
# Priority class for server pods
priorityClassName: ""
# Extra labels to attach to the server pods
# This should be a YAML map of the labels to apply to the server pods
extraLabels: {}
# Extra annotations to attach to the server pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the server pods
annotations: {}
# Enables a headless service to be used by the Vault Statefulset
service:
enabled: true
# clusterIP controls whether a Cluster IP address is attached to the
# Vault service within Kubernetes. By default the Vault service will
# be given a Cluster IP address, set to None to disable. When disabled
# Kubernetes will create a "headless" service. Headless services can be
# used to communicate with pods directly through DNS instead of a round robin
# load balancer.
# clusterIP: None
# Configures the service type for the main Vault service. Can be ClusterIP
# or NodePort.
#type: ClusterIP
# If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank.
#nodePort: 30000
# Port on which Vault server is listening
port: 8200
# Target port to which the service should be mapped to
targetPort: 8200
# Extra annotations for the service definition. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the service.
annotations: {}
# This configures the Vault Statefulset to create a PVC for data
# storage when using the file or raft backend storage engines.
# See https://www.vaultproject.io/docs/configuration/storage/index.html to know more
dataStorage:
enabled: true
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/data"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# This configures the Vault Statefulset to create a PVC for audit
# logs. Once Vault is deployed, initialized and unseal, Vault must
# be configured to use this for audit logs. This will be mounted to
# /vault/audit
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: false
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/audit"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# Run Vault in "dev" mode. This requires no further setup, no state management,
# and no initialization. This is useful for experimenting with Vault without
# needing to unseal, store keys, et. al. All data is lost on restart - do not
# use dev mode for anything other than experimenting.
# See https://www.vaultproject.io/docs/concepts/dev-server.html to know more
dev:
enabled: false
# Run Vault in "standalone" mode. This is the default mode that will deploy if
# no arguments are given to helm. This requires a PVC for data storage to use
# the "file" backend. This mode is not highly available and should not be scaled
# past a single replica.
standalone:
enabled: "-"
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data
# and store data there. This is only used when using a Replica count of 1, and
# using a stateful set. This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# Run Vault in "HA" mode. There are no storage requirements unless audit log
# persistence is required. In HA mode Vault will configure itself to use Consul
# for its storage backend. The default configuration provided will work the Consul
# Helm project by default. It is possible to manually configure Vault to use a
# different HA backend.
ha:
enabled: false
replicas: 3
# Set the api_addr configuration for Vault HA
# See https://www.vaultproject.io/docs/configuration#api_addr
# If set to null, this will be set to the Pod IP Address
apiAddr: null
# Enables Vault's integrated Raft storage. Unlike the typical HA modes where
# Vault's persistence is external (such as Consul), enabling Raft mode will create
# persistent volumes for Vault to store data according to the configuration under server.dataStorage.
# The Vault cluster will coordinate leader elections and failovers internally.
raft:
# Enables Raft integrated storage
enabled: false
# Set the Node Raft ID to the name of the pod
setNodeId: false
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a Consul for its HA storage backend.
# This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
service_registration "kubernetes" {}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# A disruption budget limits the number of pods of a replicated application
# that are down simultaneously from voluntary disruptions
disruptionBudget:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of
# replicas. If you'd like a custom value, you can specify an override here.
maxUnavailable: null
# Definition of the serviceAccount used to run Vault.
# These options are also used when using an external Vault server to validate
# Kubernetes tokens.
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Extra annotations for the serviceAccount definition. This can either be
# YAML or a YAML-formatted multi-line templated string map of the
# annotations to apply to the serviceAccount.
annotations: {}
# Settings for the statefulSet used to run Vault.
statefulSet:
# Extra annotations for the statefulSet. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the statefulSet.
annotations: {}
# Vault UI
ui:
# True if you want to create a Service entry for the Vault UI.
#
# serviceType can be used to control the type of service created. For
# example, setting this to "LoadBalancer" will create an external load
# balancer (for supported K8S installations) to access the UI.
enabled: false
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: false
serviceType: "ClusterIP"
serviceNodePort: null
externalPort: 8200
# loadBalancerSourceRanges:
# - 10.0.0.0/16
# - 1.78.23.3/32
# loadBalancerIP:
# Extra annotations to attach to the ui service
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the ui service
annotations: {}

View File

@@ -0,0 +1,44 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cluster-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-admin
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
namespace: kube-system
name: cluster-admin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system-default
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
namespace: kube-system
name: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes

View File

@@ -0,0 +1,37 @@
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-production
namespace: kube-system
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: @acme_email@
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-production
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: ca-issuer
namespace: kube-system
spec:
ca:
secretName: cluster-ca
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: selfsigning-issuer
namespace: kube-system
spec:
selfSigned: {}

View File

@@ -0,0 +1,24 @@
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: letsencrypt-production
nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
rules:
- host: dashboard.k2.itpartner.no
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
tls:
- hosts:
- dashboard.k2.itpartner.no
secretName: kubernetes-dashboard-tls

View File

@@ -0,0 +1,35 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: front-proxy-client
subjects:
- kind: User
name: front-proxy-client
apiGroup: rbac.authorization.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: front-proxy-client
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: front-proxy-client
rules:
- apiGroups:
- "webhook.cert-manager.io"
resources:
- mutations
- validations
verbs: [ "*" ]
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: List
metadata: {}
items:
- apiVersion: v1
kind: Secret
type: Opaque
metadata:
labels:
app: grafana
name: grafana-ldap-toml
namespace: prometheus
data:
ldap-toml: @grafana_ldap_toml@

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: List
metadata: {}
items:
- apiVersion: v1
kind: Secret
type: Opaque
metadata:
labels:
app: grafana
name: grafana-smtp
namespace: prometheus
data:
user: @grafana_smtp_user@
password: @grafana_smtp_password@

View File

@@ -0,0 +1,40 @@
# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-proxy
subjects:
- kind: User
name: kube-proxy
apiGroup: rbac.authorization.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-proxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-proxy-role
rules:
-
apiGroups:
- ""
resources:
- endpoints
- events
- services
- nodes
verbs: ["get", "watch", "list"]
- nonResourceURLs: ["*"]
verbs: ["get", "watch", "list"]
-
apiGroups:
- ""
resources:
- events
verbs: ["*"]
- nonResourceURLs: ["*"]
verbs: ["*"]

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: sentry-postgresql
chart: postgresql-6.5.0
heritage: Helm
release: sentry
name: sentry-sentry-postgresql
namespace: kube-system
type: Opaque
data:
postgresql-password: a1pyWlBCazVzSQ==

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
TOP=@out@/share/kubernetes-config
dest=kubernetes-config
[ $# = 1 ] && dest=$1
cp -r $TOP $dest
chmod -R ug+w $dest
# vim:ft=sh