Add bootstrapping scripts and nix expressions
This commit is contained in:
33
bootstrap/bin/install-namespace.sh
Executable file
33
bootstrap/bin/install-namespace.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TOP="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
|
||||
|
||||
if [ x$1 = x ]; then
|
||||
ehco "usage: install-namespace.sh {namespace}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
namespace=$1
|
||||
tmpfile=/tmp/new-$namespace.$$
|
||||
|
||||
cat << EOF > $tmpfile
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
name: $namespace
|
||||
name: $namespace
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: gitlab-registry-auth
|
||||
namespace: $namespace
|
||||
data:
|
||||
.dockerconfigjson: eyJhdXRocyI6eyJodHRwczovL3JlZ2lzdHJ5Lml0cGFydG5lci5ubyI6eyJ1c2VybmFtZSI6ImpvbmFzIiwicGFzc3dvcmQiOiJTRldwLVk0bkVfdXpNZFJxeHp6SyIsImF1dGgiOiJhbTl1WVhNNlUwWlhjQzFaTkc1RlgzVjZUV1JTY1hoNmVrcz0ifX19
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
EOF
|
||||
|
||||
kubectl apply -f $tmpfile
|
||||
|
||||
rm $tmpfile
|
||||
3
bootstrap/bin/reset-sa-tokens.sh
Executable file
3
bootstrap/bin/reset-sa-tokens.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
kubectl delete secrets --all-namespaces --field-selector='type=kubernetes.io/service-account-token'
|
||||
3
bootstrap/bin/taint-node-no-schedule.sh
Executable file
3
bootstrap/bin/taint-node-no-schedule.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
kubectl taint node $1 ClusterService="true":NoSchedule
|
||||
24
bootstrap/charts/cert-manager.yaml
Normal file
24
bootstrap/charts/cert-manager.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Default values for cert-manager.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
servicemonitor:
|
||||
enabled: true
|
||||
prometheusInstance: default
|
||||
targetPort: 9402
|
||||
path: /metrics
|
||||
interval: 60s
|
||||
scrapeTimeout: 30s
|
||||
labels: {}
|
||||
|
||||
webhook:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
|
||||
cainjector:
|
||||
enabled: true
|
||||
|
||||
140
bootstrap/charts/coredns.yaml
Normal file
140
bootstrap/charts/coredns.yaml
Normal file
@@ -0,0 +1,140 @@
|
||||
# Default values for coredns.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: coredns/coredns
|
||||
tag: "1.6.4"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
serviceType: "ClusterIP"
|
||||
|
||||
prometheus:
|
||||
monitor:
|
||||
enabled: true
|
||||
additionalLabels: {}
|
||||
namespace: ""
|
||||
|
||||
service:
|
||||
clusterIP: 10.0.0.254
|
||||
# loadBalancerIP: ""
|
||||
# externalTrafficPolicy: ""
|
||||
# annotations:
|
||||
# prometheus.io/scrape: "true"
|
||||
# prometheus.io/port: "9153"
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
# The name of the ServiceAccount to use
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
rbac:
|
||||
# If true, create & use RBAC resources
|
||||
create: true
|
||||
# If true, create and use PodSecurityPolicy
|
||||
pspEnable: false
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name:
|
||||
|
||||
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
|
||||
isClusterService: true
|
||||
|
||||
# Optional priority class to be used for the coredns pods
|
||||
priorityClassName: ""
|
||||
|
||||
servers:
|
||||
- zones:
|
||||
- zone: .
|
||||
port: 53
|
||||
plugins:
|
||||
- name: cache
|
||||
parameters: 30
|
||||
- name: errors
|
||||
# Serves a /health endpoint on :8080, required for livenessProbe
|
||||
- name: health
|
||||
# Serves a /ready endpoint on :8181, required for readinessProbe
|
||||
- name: ready
|
||||
# Required to query kubernetes API for data
|
||||
- name: kubernetes
|
||||
parameters: @cluster@.local
|
||||
- name: loadbalance
|
||||
parameters: round_robin
|
||||
# Serves a /metrics endpoint on :9153, required for serviceMonitor
|
||||
- name: prometheus
|
||||
parameters: 0.0.0.0:9153
|
||||
- name: forward
|
||||
parameters: . /etc/resolv.conf
|
||||
|
||||
# Complete example with all the options:
|
||||
# - zones: # the `zones` block can be left out entirely, defaults to "."
|
||||
# - zone: hello.world. # optional, defaults to "."
|
||||
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
|
||||
# - zone: foo.bar.
|
||||
# scheme: dns://
|
||||
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
|
||||
# # Note that this will not work if you are also exposing tls or grpc on the same server
|
||||
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
|
||||
# plugins: # the plugins to use for this server block
|
||||
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
|
||||
# parameters: foo bar # list of parameters after the plugin
|
||||
# configBlock: |- # if the plugin supports extra block style config, supply it here
|
||||
# hello world
|
||||
# foo bar
|
||||
|
||||
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
|
||||
# for example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: foo.bar.com/role
|
||||
# operator: In
|
||||
# values:
|
||||
# - master
|
||||
affinity: {}
|
||||
|
||||
# Node labels for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
|
||||
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
|
||||
# for example:
|
||||
# tolerations:
|
||||
# - key: foo.bar.com/role
|
||||
# operator: Equal
|
||||
# value: master
|
||||
# effect: NoSchedule
|
||||
tolerations: []
|
||||
|
||||
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
|
||||
zoneFiles: []
|
||||
# - filename: example.db
|
||||
# domain: example.com
|
||||
# contents: |
|
||||
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
|
||||
# example.com. IN NS b.iana-servers.net.
|
||||
# example.com. IN NS a.iana-servers.net.
|
||||
# example.com. IN A 192.168.99.102
|
||||
# *.example.com. IN A 192.168.99.102
|
||||
|
||||
# optonal array of secrets to mount inside coredns container
|
||||
# possible usecase: need for secure connection with etcd backend
|
||||
extraSecrets: []
|
||||
# - name: etcd-client-certs
|
||||
# mountPath: /etc/coredns/tls/etcd
|
||||
# - name: some-fancy-secret
|
||||
# mountPath: /etc/wherever
|
||||
|
||||
45
bootstrap/charts/kubernetes-dashboard.yaml
Normal file
45
bootstrap/charts/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
# Default values for kubernetes-dashboard
|
||||
# This is a YAML-formatted file.
|
||||
# Declare name/value pairs to be passed into your templates.
|
||||
# name: value
|
||||
|
||||
## Enable possibility to skip login
|
||||
enableSkipLogin: false
|
||||
|
||||
## Serve application over HTTP without TLS
|
||||
enableInsecureLogin: false
|
||||
|
||||
## Additional container arguments
|
||||
extraArgs:
|
||||
- --token-ttl=0
|
||||
|
||||
rbac:
|
||||
clusterAdminRole: true
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
externalPort: 443
|
||||
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}'
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
nginx.org/ssl-services: kubernetes-dashboard
|
||||
nginx.ingress.kubernetes.io/secure-backends: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
# nginx.ingress.kubernetes.io/ssl-passthrough: "false"
|
||||
paths:
|
||||
- /
|
||||
hosts:
|
||||
- dashboard.@cluster@.local
|
||||
tls:
|
||||
- hosts:
|
||||
- dashboard.@cluster@.local
|
||||
secretName: kubernetes-dashboard-tls-cert
|
||||
|
||||
11
bootstrap/charts/metrics-server.yaml
Normal file
11
bootstrap/charts/metrics-server.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
image.pullPolicy: Always
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
apiService:
|
||||
create: true
|
||||
|
||||
hostNetwork:
|
||||
enabled: true
|
||||
|
||||
13
bootstrap/charts/nfs-client-provisioner.yaml
Normal file
13
bootstrap/charts/nfs-client-provisioner.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
image:
|
||||
tag: latest
|
||||
storageClass:
|
||||
name: managed-nfs-storage
|
||||
defaultClass: true
|
||||
reclaimPolicy: Delete
|
||||
archiveOnDelete: true
|
||||
nfs:
|
||||
server: @fileserver@
|
||||
path: /@cluster@
|
||||
mountOptions:
|
||||
- nfsvers=4.1
|
||||
|
||||
69
bootstrap/charts/nginx-ingress.yaml
Normal file
69
bootstrap/charts/nginx-ingress.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
## nginx configuration
|
||||
## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md
|
||||
##
|
||||
controller:
|
||||
kind: Deployment
|
||||
hostNetwork: true
|
||||
service:
|
||||
type: NodePort
|
||||
nodePorts:
|
||||
http: 30080
|
||||
https: 30443
|
||||
targetPorts:
|
||||
http: http
|
||||
https: https
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
- key: unschedulable
|
||||
operator: Equal
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values: [ @apiserver@ ]
|
||||
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- nginx-ingress
|
||||
- key: component
|
||||
operator: In
|
||||
values:
|
||||
- controller
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces: []
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
minAvailable: 1
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor.enabled: true
|
||||
|
||||
# TCP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
|
||||
##
|
||||
tcp: {}
|
||||
# 8080: "default/example-tcp-svc:9000"
|
||||
|
||||
# UDP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
|
||||
##
|
||||
udp: {}
|
||||
# 53: "kube-system/kube-dns:53"
|
||||
|
||||
169
bootstrap/charts/prometheus-operator.yaml
Normal file
169
bootstrap/charts/prometheus-operator.yaml
Normal file
@@ -0,0 +1,169 @@
|
||||
alertmanager:
|
||||
|
||||
## Deploy alertmanager
|
||||
##
|
||||
enabled: true
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
|
||||
## Hosts must be provided if Ingress is enabled.
|
||||
##
|
||||
hosts:
|
||||
- alertmanager.@cluster@.local
|
||||
|
||||
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
|
||||
##
|
||||
paths: []
|
||||
# - /
|
||||
|
||||
## TLS configuration for Alertmanager Ingress
|
||||
## Secret must be manually created in the namespace
|
||||
##
|
||||
tls:
|
||||
- secretName: alertmanager-general-tls
|
||||
hosts:
|
||||
- alertmanager.@cluster@.local
|
||||
|
||||
grafana:
|
||||
enabled: true
|
||||
|
||||
defaultDashboardsEnabled: true
|
||||
|
||||
adminPassword: prom-operator
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
|
||||
hosts:
|
||||
- grafana.@cluster@.local
|
||||
path: /
|
||||
tls:
|
||||
- secretName: grafana-general-tls
|
||||
hosts:
|
||||
- grafana.@cluster@.local
|
||||
|
||||
grafana.ini:
|
||||
paths:
|
||||
data: /var/lib/grafana/data
|
||||
logs: /var/log/grafana
|
||||
plugins: /var/lib/grafana/plugins
|
||||
provisioning: /etc/grafana/provisioning
|
||||
analytics:
|
||||
check_for_updates: true
|
||||
log:
|
||||
mode: console
|
||||
grafana_net:
|
||||
url: https://grafana.net
|
||||
## LDAP Authentication can be enabled with the following values on grafana.ini
|
||||
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
|
||||
auth.ldap:
|
||||
enabled: true
|
||||
allow_sign_up: true
|
||||
config_file: /etc/grafana/ldap.toml
|
||||
smpt:
|
||||
enabled: true
|
||||
host: smtpgw.itpartner.no
|
||||
port: 465
|
||||
user: utvikling
|
||||
skip_verify: true
|
||||
|
||||
## Grafana's LDAP configuration
|
||||
## Templated by the template in _helpers.tpl
|
||||
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
|
||||
## ref: http://docs.grafana.org/installation/ldap/#configuration
|
||||
ldap:
|
||||
existingSecret: grafana-ldap-toml
|
||||
|
||||
## Grafana's SMTP configuration
|
||||
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#smtp
|
||||
smtp:
|
||||
# `existingSecret` is a reference to an existing secret containing the smtp configuration
|
||||
# for Grafana.
|
||||
existingSecret: grafana-smtp
|
||||
userKey: user
|
||||
passwordKey: password
|
||||
|
||||
kubeApiServer:
|
||||
enabled: true
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
|
||||
kubelet:
|
||||
enabled: true
|
||||
namespace: kube-system
|
||||
|
||||
coreDns:
|
||||
enabled: true
|
||||
|
||||
kubeEtcd:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
insecureSkipVerify: true
|
||||
endpoints:
|
||||
- @apiserver@
|
||||
|
||||
kubeControllerManager:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
insecureSkipVerify: true
|
||||
endpoints:
|
||||
- @apiserver@
|
||||
|
||||
kubeScheduler:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
insecureSkipVerify: true
|
||||
endpoints:
|
||||
- @apiserver@
|
||||
|
||||
kubeProxy:
|
||||
enabled: false
|
||||
endpoints:
|
||||
- @apiserver@
|
||||
@workers@
|
||||
|
||||
kubeStateMetrics:
|
||||
enabled: true
|
||||
|
||||
nodeExporter:
|
||||
enabled: true
|
||||
|
||||
prometheusOperator:
|
||||
enabled: true
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
hosts:
|
||||
- prometheus.@cluster@.local
|
||||
paths: []
|
||||
tls:
|
||||
- secretName: prometheus-general-tls
|
||||
hosts:
|
||||
- prometheus.@cluster@.local
|
||||
prometheusSpec:
|
||||
storageSpec:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
storageClassName: managed-nfs-storage
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
selector: {}
|
||||
|
||||
44
bootstrap/config/cluster-auth-rbac.yaml
Normal file
44
bootstrap/config/cluster-auth-rbac.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cluster-admin
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cluster-admin
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
name: cluster-admin
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system-default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
namespace: kube-system
|
||||
name: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kubernetes
|
||||
35
bootstrap/config/cluster-issuer.yaml
Normal file
35
bootstrap/config/cluster-issuer.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: cert-manager.io/v1alpha2
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-production
|
||||
namespace: kube-system
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
|
||||
# Email address used for ACME registration
|
||||
email: @acme_email@
|
||||
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-production
|
||||
|
||||
---
|
||||
apiVersion: cert-manager.io/v1alpha2
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: ca-issuer
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ca:
|
||||
secretName: cluster-ca
|
||||
---
|
||||
apiVersion: cert-manager.io/v1alpha2
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: selfsigning-issuer
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selfSigned: {}
|
||||
|
||||
35
bootstrap/config/front-proxy-client.yaml
Normal file
35
bootstrap/config/front-proxy-client.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: front-proxy-client
|
||||
subjects:
|
||||
- kind: User
|
||||
name: front-proxy-client
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: front-proxy-client
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: front-proxy-client
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "webhook.cert-manager.io"
|
||||
resources:
|
||||
- mutations
|
||||
- validations
|
||||
verbs: [ "*" ]
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
|
||||
14
bootstrap/config/grafana-ldap-toml.yaml
Normal file
14
bootstrap/config/grafana-ldap-toml.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
metadata: {}
|
||||
items:
|
||||
- apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
labels:
|
||||
app: grafana
|
||||
name: grafana-ldap-toml
|
||||
namespace: kube-system
|
||||
data:
|
||||
ldap-toml: @grafana_ldap_toml@
|
||||
15
bootstrap/config/grafana-smtp-secret.yaml
Normal file
15
bootstrap/config/grafana-smtp-secret.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
metadata: {}
|
||||
items:
|
||||
- apiVersion: v1
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
labels:
|
||||
app: grafana
|
||||
name: grafana-smtp
|
||||
namespace: kube-system
|
||||
data:
|
||||
user: @grafana_smtp_user@
|
||||
password: @grafana_smtp_password@
|
||||
40
bootstrap/config/kube-proxy.yaml
Normal file
40
bootstrap/config/kube-proxy.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
subjects:
|
||||
- kind: User
|
||||
name: kube-proxy
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kube-proxy
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kube-proxy-role
|
||||
rules:
|
||||
-
|
||||
apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- events
|
||||
- services
|
||||
- nodes
|
||||
verbs: ["get", "watch", "list"]
|
||||
- nonResourceURLs: ["*"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
|
||||
-
|
||||
apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs: ["*"]
|
||||
verbs: ["*"]
|
||||
|
||||
85
bootstrap/kube-system-bootstrap
Executable file
85
bootstrap/kube-system-bootstrap
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TOP=@out@/share/kube-system-bootstrap
|
||||
|
||||
ca=@initca@
|
||||
apiserver="@apiserver@"
|
||||
|
||||
grafana_ldap_toml="@grafana_ldap_toml@"
|
||||
|
||||
apply_configs () {
|
||||
d=$TOP/config
|
||||
configs[0]=$d/cluster-auth-rbac.yaml
|
||||
configs[1]=$d/kube-proxy.yaml
|
||||
configs[2]=$d/front-proxy-client.yaml
|
||||
configs[3]=$d/grafana-smtp-secret.yaml
|
||||
[ ! -z $grafana_ldap_toml ] && configs[4]=$d/grafana-ldap-toml.yaml
|
||||
|
||||
kubectl delete secret cluster-ca -n kube-system >/dev/null 2>&1
|
||||
kubectl create secret tls cluster-ca \
|
||||
--namespace=kube-system --cert=${ca}/ca.pem --key=${ca}/ca-key.pem
|
||||
|
||||
for i in ${configs[@]}; do
|
||||
kubectl apply -f $i
|
||||
done
|
||||
}
|
||||
|
||||
install_certmgr () {
|
||||
kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm install -n kube-system -f $TOP/charts/cert-manager.yaml \
|
||||
cert-manager jetstack/cert-manager
|
||||
}
|
||||
|
||||
helm_install () {
|
||||
echo "helm install $1"
|
||||
helm install -n kube-system -f $TOP/charts/$1.yaml $1 stable/$1
|
||||
}
|
||||
|
||||
helm_delete () {
|
||||
echo "helm delete existing $1"
|
||||
helm delete -n kube-system $1
|
||||
}
|
||||
|
||||
install_prometheus () {
|
||||
helm_delete prometheus-operator
|
||||
yaml=/tmp/prometheus-operator.yaml
|
||||
cp $TOP/charts/prometheus-operator.yaml $yaml
|
||||
chmod 640 $yaml
|
||||
# disable ldap for grafana
|
||||
[ -z $grafana_ldap_toml ] && \
|
||||
sed -i '/auth\.ldap:/,+1 s/true/false/; /ldap:/,+1 d' $yaml
|
||||
# disable storage
|
||||
[ -z $fileserver ] && \
|
||||
sed -i '/prometheusSpec:/,+10d' $yaml
|
||||
helm_install prometheus-operator $yaml
|
||||
}
|
||||
|
||||
install_charts () {
|
||||
[ ! -z $fileserver ] && charts[0]=nfs-client-provisioner
|
||||
charts[1]=nginx-ingress
|
||||
charts[2]=metrics-server
|
||||
charts[3]=kubernetes-dashboard
|
||||
|
||||
for i in ${charts[@]};do
|
||||
helm_install $i
|
||||
sleep 30
|
||||
done
|
||||
}
|
||||
|
||||
install_prometheus_crds () {
|
||||
url=https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds
|
||||
kubectl apply -f $url/crd-alertmanager.yaml
|
||||
kubectl apply -f $url/crd-prometheus.yaml
|
||||
kubectl apply -f $url/crd-prometheusrules.yaml
|
||||
kubectl apply -f $url/crd-servicemonitor.yaml
|
||||
kubectl apply -f $url/crd-podmonitor.yaml
|
||||
}
|
||||
|
||||
apply_configs
|
||||
install_prometheus_crds
|
||||
install_certmgr
|
||||
install_charts
|
||||
install_prometheus
|
||||
|
||||
# vim:ft=sh
|
||||
Reference in New Issue
Block a user