Reorganize project
This commit is contained in:
24
kubernetes-config/charts/cert-manager.yaml
Normal file
24
kubernetes-config/charts/cert-manager.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Default values for cert-manager.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
servicemonitor:
|
||||
enabled: true
|
||||
prometheusInstance: default
|
||||
targetPort: 9402
|
||||
path: /metrics
|
||||
interval: 60s
|
||||
scrapeTimeout: 30s
|
||||
labels: {}
|
||||
|
||||
webhook:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
|
||||
cainjector:
|
||||
enabled: true
|
||||
|
||||
140
kubernetes-config/charts/coredns.yaml
Normal file
140
kubernetes-config/charts/coredns.yaml
Normal file
@@ -0,0 +1,140 @@
|
||||
# Default values for coredns.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: coredns/coredns
|
||||
tag: "1.6.4"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
serviceType: "ClusterIP"
|
||||
|
||||
prometheus:
|
||||
monitor:
|
||||
enabled: true
|
||||
additionalLabels: {}
|
||||
namespace: ""
|
||||
|
||||
service:
|
||||
clusterIP: 10.0.0.254
|
||||
# loadBalancerIP: ""
|
||||
# externalTrafficPolicy: ""
|
||||
# annotations:
|
||||
# prometheus.io/scrape: "true"
|
||||
# prometheus.io/port: "9153"
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
# The name of the ServiceAccount to use
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
rbac:
|
||||
# If true, create & use RBAC resources
|
||||
create: true
|
||||
# If true, create and use PodSecurityPolicy
|
||||
pspEnable: false
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
# name:
|
||||
|
||||
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
|
||||
isClusterService: true
|
||||
|
||||
# Optional priority class to be used for the coredns pods
|
||||
priorityClassName: ""
|
||||
|
||||
servers:
|
||||
- zones:
|
||||
- zone: .
|
||||
port: 53
|
||||
plugins:
|
||||
- name: cache
|
||||
parameters: 30
|
||||
- name: errors
|
||||
# Serves a /health endpoint on :8080, required for livenessProbe
|
||||
- name: health
|
||||
# Serves a /ready endpoint on :8181, required for readinessProbe
|
||||
- name: ready
|
||||
# Required to query kubernetes API for data
|
||||
- name: kubernetes
|
||||
parameters: @cluster@.local
|
||||
- name: loadbalance
|
||||
parameters: round_robin
|
||||
# Serves a /metrics endpoint on :9153, required for serviceMonitor
|
||||
- name: prometheus
|
||||
parameters: 0.0.0.0:9153
|
||||
- name: forward
|
||||
parameters: . /etc/resolv.conf
|
||||
|
||||
# Complete example with all the options:
|
||||
# - zones: # the `zones` block can be left out entirely, defaults to "."
|
||||
# - zone: hello.world. # optional, defaults to "."
|
||||
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
|
||||
# - zone: foo.bar.
|
||||
# scheme: dns://
|
||||
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
|
||||
# # Note that this will not work if you are also exposing tls or grpc on the same server
|
||||
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
|
||||
# plugins: # the plugins to use for this server block
|
||||
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
|
||||
# parameters: foo bar # list of parameters after the plugin
|
||||
# configBlock: |- # if the plugin supports extra block style config, supply it here
|
||||
# hello world
|
||||
# foo bar
|
||||
|
||||
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
|
||||
# for example:
|
||||
# affinity:
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: foo.bar.com/role
|
||||
# operator: In
|
||||
# values:
|
||||
# - master
|
||||
affinity: {}
|
||||
|
||||
# Node labels for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
nodeSelector: {}
|
||||
|
||||
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
|
||||
# for example:
|
||||
# tolerations:
|
||||
# - key: foo.bar.com/role
|
||||
# operator: Equal
|
||||
# value: master
|
||||
# effect: NoSchedule
|
||||
tolerations: []
|
||||
|
||||
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
|
||||
zoneFiles: []
|
||||
# - filename: example.db
|
||||
# domain: example.com
|
||||
# contents: |
|
||||
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
|
||||
# example.com. IN NS b.iana-servers.net.
|
||||
# example.com. IN NS a.iana-servers.net.
|
||||
# example.com. IN A 192.168.99.102
|
||||
# *.example.com. IN A 192.168.99.102
|
||||
|
||||
# optonal array of secrets to mount inside coredns container
|
||||
# possible usecase: need for secure connection with etcd backend
|
||||
extraSecrets: []
|
||||
# - name: etcd-client-certs
|
||||
# mountPath: /etc/coredns/tls/etcd
|
||||
# - name: some-fancy-secret
|
||||
# mountPath: /etc/wherever
|
||||
|
||||
45
kubernetes-config/charts/kubernetes-dashboard.yaml
Normal file
45
kubernetes-config/charts/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
# Default values for kubernetes-dashboard
|
||||
# This is a YAML-formatted file.
|
||||
# Declare name/value pairs to be passed into your templates.
|
||||
# name: value
|
||||
|
||||
## Enable possibility to skip login
|
||||
enableSkipLogin: false
|
||||
|
||||
## Serve application over HTTP without TLS
|
||||
enableInsecureLogin: false
|
||||
|
||||
## Additional container arguments
|
||||
extraArgs:
|
||||
- --token-ttl=0
|
||||
|
||||
rbac:
|
||||
clusterAdminRole: true
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
externalPort: 443
|
||||
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}'
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
nginx.org/ssl-services: kubernetes-dashboard
|
||||
nginx.ingress.kubernetes.io/secure-backends: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
# nginx.ingress.kubernetes.io/ssl-passthrough: "false"
|
||||
paths:
|
||||
- /
|
||||
hosts:
|
||||
- dashboard.@cluster@.local
|
||||
tls:
|
||||
- hosts:
|
||||
- dashboard.@cluster@.local
|
||||
secretName: kubernetes-dashboard-tls-cert
|
||||
|
||||
11
kubernetes-config/charts/metrics-server.yaml
Normal file
11
kubernetes-config/charts/metrics-server.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
image.pullPolicy: Always
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
apiService:
|
||||
create: true
|
||||
|
||||
hostNetwork:
|
||||
enabled: true
|
||||
|
||||
46
kubernetes-config/charts/minio.yaml
Normal file
46
kubernetes-config/charts/minio.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
# helm repo add minio https://helm.min.io/
|
||||
# helm install --version 6.0.5 -f minio.yaml -n minio minio minio/minio
|
||||
|
||||
|
||||
accessKey: Mkd324ijlnfll23883
|
||||
secretKey: KJQfefrnflol93jpj31mrkjs3i88sj2L
|
||||
|
||||
# environment:
|
||||
# MINIO_ACCESS_KEY_OLD: YOURACCESSKEY
|
||||
# MINIO_SECRET_KEY_OLD: YOURSECRETKEY
|
||||
|
||||
defaultBucket:
|
||||
enabled: true
|
||||
name: default
|
||||
policy: none
|
||||
purge: false
|
||||
|
||||
buckets:
|
||||
- name: serit
|
||||
policy: none
|
||||
purge: false
|
||||
- name: gitlab
|
||||
policy: none
|
||||
purge: false
|
||||
|
||||
clusterDomain: kube2.local
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
nginx.ingress.kubernetes.io/whitelist-source-range: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
|
||||
hosts:
|
||||
- minio.k2.local
|
||||
tls:
|
||||
- hosts:
|
||||
- minio.k2.local
|
||||
secretName: minio-tls
|
||||
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 100Gi
|
||||
storageClass: managed-nfs-storage
|
||||
|
||||
13
kubernetes-config/charts/nfs-client-provisioner.yaml
Normal file
13
kubernetes-config/charts/nfs-client-provisioner.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
image:
|
||||
tag: latest
|
||||
storageClass:
|
||||
name: managed-nfs-storage
|
||||
defaultClass: true
|
||||
reclaimPolicy: Delete
|
||||
archiveOnDelete: true
|
||||
nfs:
|
||||
server: @fileserver@
|
||||
path: /@cluster@
|
||||
mountOptions:
|
||||
- nfsvers=4.1
|
||||
|
||||
69
kubernetes-config/charts/nginx-ingress.yaml
Normal file
69
kubernetes-config/charts/nginx-ingress.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
## nginx configuration
|
||||
## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md
|
||||
##
|
||||
controller:
|
||||
kind: Deployment
|
||||
hostNetwork: true
|
||||
service:
|
||||
type: NodePort
|
||||
nodePorts:
|
||||
http: 30080
|
||||
https: 30443
|
||||
targetPorts:
|
||||
http: http
|
||||
https: https
|
||||
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
- key: unschedulable
|
||||
operator: Equal
|
||||
value: "true"
|
||||
effect: NoSchedule
|
||||
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values: [ @apiserver@ ]
|
||||
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- nginx-ingress
|
||||
- key: component
|
||||
operator: In
|
||||
values:
|
||||
- controller
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces: []
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
minAvailable: 1
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor.enabled: true
|
||||
|
||||
# TCP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
|
||||
##
|
||||
tcp: {}
|
||||
# 8080: "default/example-tcp-svc:9000"
|
||||
|
||||
# UDP service key:value pairs
|
||||
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
|
||||
##
|
||||
udp: {}
|
||||
# 53: "kube-system/kube-dns:53"
|
||||
|
||||
169
kubernetes-config/charts/prometheus-operator.yaml
Normal file
169
kubernetes-config/charts/prometheus-operator.yaml
Normal file
@@ -0,0 +1,169 @@
|
||||
alertmanager:
|
||||
|
||||
## Deploy alertmanager
|
||||
##
|
||||
enabled: true
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
|
||||
## Hosts must be provided if Ingress is enabled.
|
||||
##
|
||||
hosts:
|
||||
- alertmanager.@cluster@.local
|
||||
|
||||
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
|
||||
##
|
||||
paths: []
|
||||
# - /
|
||||
|
||||
## TLS configuration for Alertmanager Ingress
|
||||
## Secret must be manually created in the namespace
|
||||
##
|
||||
tls:
|
||||
- secretName: alertmanager-general-tls
|
||||
hosts:
|
||||
- alertmanager.@cluster@.local
|
||||
|
||||
grafana:
|
||||
enabled: true
|
||||
|
||||
defaultDashboardsEnabled: true
|
||||
|
||||
adminPassword: prom-operator
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
|
||||
hosts:
|
||||
- grafana.@cluster@.local
|
||||
path: /
|
||||
tls:
|
||||
- secretName: grafana-general-tls
|
||||
hosts:
|
||||
- grafana.@cluster@.local
|
||||
|
||||
grafana.ini:
|
||||
paths:
|
||||
data: /var/lib/grafana/data
|
||||
logs: /var/log/grafana
|
||||
plugins: /var/lib/grafana/plugins
|
||||
provisioning: /etc/grafana/provisioning
|
||||
analytics:
|
||||
check_for_updates: true
|
||||
log:
|
||||
mode: console
|
||||
grafana_net:
|
||||
url: https://grafana.net
|
||||
## LDAP Authentication can be enabled with the following values on grafana.ini
|
||||
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
|
||||
auth.ldap:
|
||||
enabled: true
|
||||
allow_sign_up: true
|
||||
config_file: /etc/grafana/ldap.toml
|
||||
smpt:
|
||||
enabled: true
|
||||
host: smtpgw.itpartner.no
|
||||
port: 465
|
||||
user: utvikling
|
||||
skip_verify: true
|
||||
|
||||
## Grafana's LDAP configuration
|
||||
## Templated by the template in _helpers.tpl
|
||||
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
|
||||
## ref: http://docs.grafana.org/installation/ldap/#configuration
|
||||
ldap:
|
||||
existingSecret: grafana-ldap-toml
|
||||
|
||||
## Grafana's SMTP configuration
|
||||
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
|
||||
## ref: http://docs.grafana.org/installation/configuration/#smtp
|
||||
smtp:
|
||||
# `existingSecret` is a reference to an existing secret containing the smtp configuration
|
||||
# for Grafana.
|
||||
existingSecret: grafana-smtp
|
||||
userKey: user
|
||||
passwordKey: password
|
||||
|
||||
kubeApiServer:
|
||||
enabled: true
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
|
||||
kubelet:
|
||||
enabled: true
|
||||
namespace: kube-system
|
||||
|
||||
coreDns:
|
||||
enabled: true
|
||||
|
||||
kubeEtcd:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
insecureSkipVerify: true
|
||||
endpoints:
|
||||
- @apiserverAddress@
|
||||
|
||||
kubeControllerManager:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
insecureSkipVerify: true
|
||||
endpoints:
|
||||
- @apiserverAddress@
|
||||
|
||||
kubeScheduler:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
insecureSkipVerify: true
|
||||
endpoints:
|
||||
- @apiserverAddress@
|
||||
|
||||
kubeProxy:
|
||||
enabled: false
|
||||
endpoints:
|
||||
- @apiserverAddress@
|
||||
@workers@
|
||||
|
||||
kubeStateMetrics:
|
||||
enabled: true
|
||||
|
||||
nodeExporter:
|
||||
enabled: true
|
||||
|
||||
prometheusOperator:
|
||||
enabled: true
|
||||
|
||||
prometheus:
|
||||
enabled: true
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
hosts:
|
||||
- prometheus.@cluster@.local
|
||||
paths: []
|
||||
tls:
|
||||
- secretName: prometheus-general-tls
|
||||
hosts:
|
||||
- prometheus.@cluster@.local
|
||||
prometheusSpec:
|
||||
storageSpec:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
storageClassName: managed-nfs-storage
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
selector: {}
|
||||
|
||||
287
kubernetes-config/charts/sentry.yaml
Normal file
287
kubernetes-config/charts/sentry.yaml
Normal file
@@ -0,0 +1,287 @@
|
||||
# helm install --namespace kube-system --timeout 1000 -f sentry.yaml sentry stable/sentry
|
||||
# image:
|
||||
# repository: sentry
|
||||
# tag: 9
|
||||
# pullPolicy: IfNotPresent
|
||||
# # Add the secret name to pull from a private registry.
|
||||
# imagePullSecrets: []
|
||||
# # - name:
|
||||
|
||||
# How many web UI instances to run
|
||||
# web:
|
||||
# replicacount: 1
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 500m
|
||||
# memory: 500Mi
|
||||
# requests:
|
||||
# cpu: 300m
|
||||
# memory: 300Mi
|
||||
# env:
|
||||
# - name: GITHUB_APP_ID
|
||||
# value:
|
||||
# - name: GITHUB_API_SECRET
|
||||
# value:
|
||||
# nodeSelector: {}
|
||||
# tolerations: []
|
||||
# affinity: {}
|
||||
# probeInitialDelaySeconds: 50
|
||||
# priorityClassName: ""
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName:
|
||||
# Optional extra labels for pod, i.e. redis-client: "true"
|
||||
# podLabels: []
|
||||
|
||||
|
||||
# How many cron instances to run
|
||||
# cron:
|
||||
# replicacount: 1
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 200Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# nodeSelector: {}
|
||||
# tolerations: []
|
||||
# affinity: {}
|
||||
# priorityClassName: ""
|
||||
# schedulerName:
|
||||
# Optional extra labels for pod, i.e. redis-client: "true"
|
||||
# podLabels: []
|
||||
|
||||
# How many worker instances to run
|
||||
# worker:
|
||||
# replicacount: 2
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 300m
|
||||
# memory: 500Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# nodeSelector: {}
|
||||
# tolerations: []
|
||||
# affinity: {}
|
||||
# priorityClassName: ""
|
||||
# schedulerName:
|
||||
# Optional extra labels for pod, i.e. redis-client: "true"
|
||||
# podLabels: []
|
||||
# concurrency:
|
||||
|
||||
# Admin user to create
|
||||
user:
|
||||
# Indicated to create the admin user or not,
|
||||
# Default is true as the initial installation.
|
||||
create: true
|
||||
email: admin
|
||||
|
||||
# BYO Email server
|
||||
# TODO: Add exim4 template
|
||||
# https://docs.sentry.io/server/installation/docker/#outbound-email
|
||||
email:
|
||||
from_address: sentry@sentry.itpartner.no
|
||||
host: smtpgw.itpartner.no
|
||||
port: 465
|
||||
use_tls: false
|
||||
user: utvikling
|
||||
password: S0m3rp0m@de#21!
|
||||
enable_replies: false
|
||||
|
||||
# Name of the service and what port to expose on the pod
|
||||
# Don't change these unless you know what you're doing
|
||||
service:
|
||||
name: sentry
|
||||
type: ClusterIP
|
||||
# externalPort: 9000
|
||||
# internalPort: 9000
|
||||
|
||||
# ## Service annotations
|
||||
# ##
|
||||
# annotations: {}
|
||||
|
||||
## External IP addresses of service
|
||||
## Default: nil
|
||||
##
|
||||
# externalIPs:
|
||||
# - 192.168.0.1
|
||||
|
||||
## Load Balancer allow-list
|
||||
# loadBalancerSourceRanges: []
|
||||
|
||||
# Configure the location of Sentry artifacts
|
||||
filestore:
|
||||
# Set to one of filesystem, gcs or s3 as supported by Sentry.
|
||||
backend: filesystem
|
||||
|
||||
filesystem:
|
||||
path: /var/lib/sentry/files
|
||||
|
||||
## Enable persistence using Persistent Volume Claims
|
||||
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
|
||||
##
|
||||
persistence:
|
||||
enabled: true
|
||||
## database data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: managed-nfs-storage
|
||||
accessMode: ReadWriteOnce
|
||||
size: 10Gi
|
||||
|
||||
## Whether to mount the persistent volume to the Sentry worker and
|
||||
## cron deployments. This setting needs to be enabled for some advanced
|
||||
## Sentry features, such as private source maps. If you disable this
|
||||
## setting, the Sentry workers will not have access to artifacts you upload
|
||||
## through the web deployment.
|
||||
## Please note that you may need to change your accessMode to ReadWriteMany
|
||||
## if you plan on having the web, worker and cron deployments run on
|
||||
## different nodes.
|
||||
# persistentWorkers: false
|
||||
|
||||
## Point this at a pre-configured secret containing a service account. The resulting
|
||||
## secret will be mounted at /var/run/secrets/google
|
||||
# gcs:
|
||||
# credentialsFile: credentials.json
|
||||
# secretName:
|
||||
# bucketName:
|
||||
|
||||
## Currently unconfigured and changing this has no impact on the template configuration.
|
||||
# s3: {}
|
||||
# accessKey:
|
||||
# secretKey:
|
||||
# bucketName:
|
||||
|
||||
## Configure ingress resource that allow you to access the
|
||||
## Sentry installation. Set up the URL
|
||||
## ref: http://kubernetes.io/docs/user-guide/ingress/
|
||||
##
|
||||
ingress:
|
||||
enabled: true
|
||||
hostname: sentry.itpartner.no
|
||||
|
||||
## Ingress annotations
|
||||
##
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: ca-issuer
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTP
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
# kubernetes.io/tls-acme: 'true'
|
||||
tls:
|
||||
- secretName: sentry-tls-cert
|
||||
hosts:
|
||||
- sentry.itpartner.no
|
||||
|
||||
# TODO: add support for plugins https://docs.sentry.io/server/plugins/
|
||||
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlDatabase: sentry
|
||||
postgresqlUsername: postgres
|
||||
postgresqlPassword: jdjiujh1212eo
|
||||
# # Only used when internal PG is disabled
|
||||
# # postgresHost: postgres
|
||||
# # postgresPassword: postgres
|
||||
# # postgresPort: 5432
|
||||
# imageTag: "9.6"
|
||||
# persistence:
|
||||
# enabled: true
|
||||
|
||||
redis:
|
||||
clusterDomain: kube2.local
|
||||
# enabled: true
|
||||
# Only used when internal redis is disabled
|
||||
# host: redis
|
||||
# Just omit the password field if your redis cluster doesn't use password
|
||||
# password: redis
|
||||
# port: 6379
|
||||
# master:
|
||||
# persistence:
|
||||
# enabled: true
|
||||
# If change pvc size redis.master.persistence.size: 20Gi
|
||||
|
||||
# config:
|
||||
# configYml: ""
|
||||
# sentryConfPy: ""
|
||||
|
||||
## Prometheus Exporter / Metrics
|
||||
##
|
||||
#metrics:
|
||||
# enabled: true
|
||||
|
||||
# ## Configure extra options for liveness and readiness probes
|
||||
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
|
||||
# livenessProbe:
|
||||
# enabled: true
|
||||
# initialDelaySeconds: 30
|
||||
# periodSeconds: 5
|
||||
# timeoutSeconds: 2
|
||||
# failureThreshold: 3
|
||||
# successThreshold: 1
|
||||
# readinessProbe:
|
||||
# enabled: true
|
||||
# initialDelaySeconds: 30
|
||||
# periodSeconds: 5
|
||||
# timeoutSeconds: 2
|
||||
# failureThreshold: 3
|
||||
# successThreshold: 1
|
||||
|
||||
# ## Metrics exporter resource requests and limits
|
||||
# ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 100Mi
|
||||
|
||||
# nodeSelector: {}
|
||||
# tolerations: []
|
||||
# affinity: {}
|
||||
# # schedulerName:
|
||||
# # Optional extra labels for pod, i.e. redis-client: "true"
|
||||
# # podLabels: []
|
||||
# service:
|
||||
# type: ClusterIP
|
||||
# labels: {}
|
||||
|
||||
# image:
|
||||
# repository: prom/statsd-exporter
|
||||
# tag: v0.10.5
|
||||
# pullPolicy: IfNotPresent
|
||||
|
||||
# # Enable this if you're using https://github.com/coreos/prometheus-operator
|
||||
# serviceMonitor:
|
||||
# enabled: true
|
||||
# ## Specify a namespace if needed
|
||||
# # namespace: kube-system
|
||||
# # fallback to the prometheus default unless specified
|
||||
# # interval: 10s
|
||||
# ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
|
||||
# ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
|
||||
# ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
|
||||
# # selector:
|
||||
# # app: prometheus
|
||||
# #prometheus: kube-prometheus
|
||||
|
||||
## Provide affinity for hooks if needed
|
||||
#hooks:
|
||||
# affinity: {}
|
||||
# dbInit:
|
||||
# resources:
|
||||
# # We setup 3000Mi for the memory limit because of a Sentry instance need at least 3Gb RAM to perform a migration process
|
||||
# # reference: https://github.com/helm/charts/issues/15296
|
||||
# limits:
|
||||
# memory: 3200Mi
|
||||
# requests:
|
||||
# memory: 3000Mi
|
||||
|
||||
588
kubernetes-config/charts/vault-values.yaml
Normal file
588
kubernetes-config/charts/vault-values.yaml
Normal file
@@ -0,0 +1,588 @@
|
||||
# Available parameters and their default values for the Vault chart.
|
||||
|
||||
global:
|
||||
# enabled is the master enabled switch. Setting this to true or false
|
||||
# will enable or disable all the components within this chart by default.
|
||||
enabled: true
|
||||
# Image pull secret to use for registry authentication.
|
||||
imagePullSecrets: []
|
||||
# imagePullSecrets:
|
||||
# - name: image-pull-secret
|
||||
# TLS for end-to-end encrypted transport
|
||||
tlsDisable: true
|
||||
# If deploying to OpenShift
|
||||
openshift: false
|
||||
# Create PodSecurityPolicy for pods
|
||||
psp:
|
||||
enable: false
|
||||
# Annotation for PodSecurityPolicy.
|
||||
# This is a multi-line templated string map, and can also be set as YAML.
|
||||
annotations: |
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
|
||||
injector:
|
||||
# True if you want to enable vault agent injection.
|
||||
enabled: true
|
||||
|
||||
# If true, will enable a node exporter metrics endpoint at /metrics.
|
||||
metrics:
|
||||
enabled: false
|
||||
|
||||
# External vault server address for the injector to use. Setting this will
|
||||
# disable deployment of a vault server along with the injector.
|
||||
externalVaultAddr: ""
|
||||
|
||||
# image sets the repo and tag of the vault-k8s image to use for the injector.
|
||||
image:
|
||||
repository: "hashicorp/vault-k8s"
|
||||
tag: "0.6.0"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# agentImage sets the repo and tag of the Vault image to use for the Vault Agent
|
||||
# containers. This should be set to the official Vault image. Vault 1.3.1+ is
|
||||
# required.
|
||||
agentImage:
|
||||
repository: "vault"
|
||||
tag: "1.5.4"
|
||||
|
||||
# Mount Path of the Vault Kubernetes Auth Method.
|
||||
authPath: "auth/kubernetes"
|
||||
|
||||
# Configures the log verbosity of the injector. Supported log levels: Trace, Debug, Error, Warn, Info
|
||||
logLevel: "info"
|
||||
|
||||
# Configures the log format of the injector. Supported log formats: "standard", "json".
|
||||
logFormat: "standard"
|
||||
|
||||
# Configures all Vault Agent sidecars to revoke their token when shutting down
|
||||
revokeOnShutdown: false
|
||||
|
||||
# namespaceSelector is the selector for restricting the webhook to only
|
||||
# specific namespaces.
|
||||
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector
|
||||
# for more details.
|
||||
# Example:
|
||||
# namespaceSelector:
|
||||
# matchLabels:
|
||||
# sidecar-injector: enabled
|
||||
namespaceSelector: {}
|
||||
|
||||
# Configures failurePolicy of the webhook. By default webhook failures are ignored.
|
||||
# To block pod creation while webhook is unavailable, set the policy to `Fail` below.
|
||||
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy
|
||||
#
|
||||
# failurePolcy: Fail
|
||||
|
||||
certs:
|
||||
# secretName is the name of the secret that has the TLS certificate and
|
||||
# private key to serve the injector webhook. If this is null, then the
|
||||
# injector will default to its automatic management mode that will assign
|
||||
# a service account to the injector to generate its own certificates.
|
||||
secretName: null
|
||||
|
||||
# caBundle is a base64-encoded PEM-encoded certificate bundle for the
|
||||
# CA that signed the TLS certificate that the webhook serves. This must
|
||||
# be set if secretName is non-null.
|
||||
caBundle: ""
|
||||
|
||||
# certName and keyName are the names of the files within the secret for
|
||||
# the TLS cert and private key, respectively. These have reasonable
|
||||
# defaults but can be customized if necessary.
|
||||
certName: tls.crt
|
||||
keyName: tls.key
|
||||
|
||||
resources: {}
|
||||
# resources:
|
||||
# requests:
|
||||
# memory: 256Mi
|
||||
# cpu: 250m
|
||||
# limits:
|
||||
# memory: 256Mi
|
||||
# cpu: 250m
|
||||
|
||||
# extraEnvironmentVars is a list of extra environment variables to set in the
|
||||
# injector deployment.
|
||||
extraEnvironmentVars: {}
|
||||
# KUBERNETES_SERVICE_HOST: kubernetes.default.svc
|
||||
|
||||
# Affinity Settings for injector pods
|
||||
# This should be a multi-line string matching the affinity section of a
|
||||
# PodSpec.
|
||||
affinity: null
|
||||
|
||||
# Toleration Settings for injector pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: null
|
||||
|
||||
# nodeSelector labels for injector pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: null
|
||||
|
||||
# Priority class for injector pods
|
||||
priorityClassName: ""
|
||||
|
||||
# Extra annotations to attach to the injector pods
|
||||
# This can either be YAML or a YAML-formatted multi-line templated string map
|
||||
# of the annotations to apply to the injector pods
|
||||
annotations: {}
|
||||
|
||||
server:
|
||||
# Resource requests, limits, etc. for the server cluster placement. This
|
||||
# should map directly to the value of the resources field for a PodSpec.
|
||||
# By default no direct resource request is made.
|
||||
|
||||
image:
|
||||
repository: "vault"
|
||||
tag: "1.5.4"
|
||||
# Overrides the default Image Pull Policy
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# Configure the Update Strategy Type for the StatefulSet
|
||||
# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
|
||||
updateStrategyType: "OnDelete"
|
||||
|
||||
resources: {}
|
||||
# resources:
|
||||
# requests:
|
||||
# memory: 256Mi
|
||||
# cpu: 250m
|
||||
# limits:
|
||||
# memory: 256Mi
|
||||
# cpu: 250m
|
||||
|
||||
# Ingress allows ingress services to be created to allow external access
|
||||
# from Kubernetes to access Vault pods.
|
||||
# If deployment is on OpenShift, the following block is ignored.
|
||||
# In order to expose the service, use the route section below
|
||||
ingress:
|
||||
enabled: true
|
||||
labels: {}
|
||||
# traffic: external
|
||||
annotations:
|
||||
# |
|
||||
cert-manager.io/cluster-issuer: letsencrypt-production
|
||||
kubernetes.io/ingress.class: nginx
|
||||
kubernetes.io/tls-acme: "true"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
hosts:
|
||||
- host: vault.k2.itpartner.no
|
||||
paths: [ / ]
|
||||
tls:
|
||||
- secretName: vault-tls
|
||||
hosts:
|
||||
- vault.k2.itpartner.no
|
||||
|
||||
# OpenShift only - create a route to expose the service
|
||||
# The created route will be of type passthrough
|
||||
route:
|
||||
enabled: false
|
||||
labels: {}
|
||||
annotations: {}
|
||||
host: chart-example.local
|
||||
|
||||
# authDelegator enables a cluster role binding to be attached to the service
|
||||
# account. This cluster role binding can be used to setup Kubernetes auth
|
||||
# method. https://www.vaultproject.io/docs/auth/kubernetes.html
|
||||
authDelegator:
|
||||
enabled: true
|
||||
|
||||
# extraInitContainers is a list of init containers. Specified as a YAML list.
|
||||
# This is useful if you need to run a script to provision TLS certificates or
|
||||
# write out configuration files in a dynamic way.
|
||||
extraInitContainers: null
|
||||
# # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder,
|
||||
# # which is defined in the volumes value.
|
||||
# - name: oauthapp
|
||||
# image: "alpine"
|
||||
# command: [sh, -c]
|
||||
# args:
|
||||
# - cd /tmp &&
|
||||
# wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz &&
|
||||
# tar -xf oauthapp.xz &&
|
||||
# mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp &&
|
||||
# chmod +x /usr/local/libexec/vault/oauthapp
|
||||
# volumeMounts:
|
||||
# - name: plugins
|
||||
# mountPath: /usr/local/libexec/vault
|
||||
|
||||
# extraContainers is a list of sidecar containers. Specified as a YAML list.
|
||||
extraContainers: null
|
||||
|
||||
# shareProcessNamespace enables process namespace sharing between Vault and the extraContainers
|
||||
# This is useful if Vault must be signaled, e.g. to send a SIGHUP for log rotation
|
||||
shareProcessNamespace: false
|
||||
|
||||
# extraArgs is a string containing additional Vault server arguments.
|
||||
extraArgs: ""
|
||||
|
||||
# Used to define custom readinessProbe settings
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
# If you need to use a http path instead of the default exec
|
||||
# path: /v1/sys/health?standbyok=true
|
||||
|
||||
# When a probe fails, Kubernetes will try failureThreshold times before giving up
|
||||
failureThreshold: 2
|
||||
# Number of seconds after the container has started before probe initiates
|
||||
initialDelaySeconds: 5
|
||||
# How often (in seconds) to perform the probe
|
||||
periodSeconds: 5
|
||||
# Minimum consecutive successes for the probe to be considered successful after having failed
|
||||
successThreshold: 1
|
||||
# Number of seconds after which the probe times out.
|
||||
timeoutSeconds: 3
|
||||
# Used to enable a livenessProbe for the pods
|
||||
livenessProbe:
|
||||
enabled: false
|
||||
path: "/v1/sys/health?standbyok=true"
|
||||
# When a probe fails, Kubernetes will try failureThreshold times before giving up
|
||||
failureThreshold: 2
|
||||
# Number of seconds after the container has started before probe initiates
|
||||
initialDelaySeconds: 60
|
||||
# How often (in seconds) to perform the probe
|
||||
periodSeconds: 5
|
||||
# Minimum consecutive successes for the probe to be considered successful after having failed
|
||||
successThreshold: 1
|
||||
# Number of seconds after which the probe times out.
|
||||
timeoutSeconds: 3
|
||||
|
||||
# Used to set the sleep time during the preStop step
|
||||
preStopSleepSeconds: 5
|
||||
|
||||
# Used to define commands to run after the pod is ready.
|
||||
# This can be used to automate processes such as initialization
|
||||
# or boostrapping auth methods.
|
||||
postStart: []
|
||||
# - /bin/sh
|
||||
# - -c
|
||||
# - /vault/userconfig/myscript/run.sh
|
||||
|
||||
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
|
||||
# used to include variables required for auto-unseal.
|
||||
extraEnvironmentVars: {}
|
||||
# GOOGLE_REGION: global
|
||||
# GOOGLE_PROJECT: myproject
|
||||
# GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json
|
||||
|
||||
# extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set.
|
||||
# These variables take value from existing Secret objects.
|
||||
extraSecretEnvironmentVars: []
|
||||
# - envName: AWS_SECRET_ACCESS_KEY
|
||||
# secretName: vault
|
||||
# secretKey: AWS_SECRET_ACCESS_KEY
|
||||
|
||||
# extraVolumes is a list of extra volumes to mount. These will be exposed
|
||||
# to Vault in the path `/vault/userconfig/<name>/`. The value below is
|
||||
# an array of objects, examples are shown below.
|
||||
extraVolumes: []
|
||||
# - type: secret (or "configMap")
|
||||
# name: my-secret
|
||||
# path: null # default is `/vault/userconfig`
|
||||
|
||||
# volumes is a list of volumes made available to all containers. These are rendered
|
||||
# via toYaml rather than pre-processed like the extraVolumes value.
|
||||
# The purpose is to make it easy to share volumes between containers.
|
||||
volumes: null
|
||||
# - name: plugins
|
||||
# emptyDir: {}
|
||||
|
||||
# volumeMounts is a list of volumeMounts for the main server container. These are rendered
|
||||
# via toYaml rather than pre-processed like the extraVolumes value.
|
||||
# The purpose is to make it easy to share volumes between containers.
|
||||
volumeMounts: null
|
||||
# - mountPath: /usr/local/libexec/vault
|
||||
# name: plugins
|
||||
# readOnly: true
|
||||
|
||||
|
||||
# Affinity Settings
|
||||
# Commenting out or setting as empty the affinity variable, will allow
|
||||
# deployment to single node services such as Minikube
|
||||
affinity: |
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "vault.name" . }}
|
||||
app.kubernetes.io/instance: "{{ .Release.Name }}"
|
||||
component: server
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
# Toleration Settings for server pods
|
||||
# This should be a multi-line string matching the Toleration array
|
||||
# in a PodSpec.
|
||||
tolerations: null
|
||||
|
||||
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
|
||||
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
# Example:
|
||||
# nodeSelector: |
|
||||
# beta.kubernetes.io/arch: amd64
|
||||
nodeSelector: null
|
||||
|
||||
# Enables network policy for server pods
|
||||
networkPolicy:
|
||||
enabled: false
|
||||
|
||||
# Priority class for server pods
|
||||
priorityClassName: ""
|
||||
|
||||
# Extra labels to attach to the server pods
|
||||
# This should be a YAML map of the labels to apply to the server pods
|
||||
extraLabels: {}
|
||||
|
||||
# Extra annotations to attach to the server pods
|
||||
# This can either be YAML or a YAML-formatted multi-line templated string map
|
||||
# of the annotations to apply to the server pods
|
||||
annotations: {}
|
||||
|
||||
# Enables a headless service to be used by the Vault Statefulset
|
||||
service:
|
||||
enabled: true
|
||||
# clusterIP controls whether a Cluster IP address is attached to the
|
||||
# Vault service within Kubernetes. By default the Vault service will
|
||||
# be given a Cluster IP address, set to None to disable. When disabled
|
||||
# Kubernetes will create a "headless" service. Headless services can be
|
||||
# used to communicate with pods directly through DNS instead of a round robin
|
||||
# load balancer.
|
||||
# clusterIP: None
|
||||
|
||||
# Configures the service type for the main Vault service. Can be ClusterIP
|
||||
# or NodePort.
|
||||
#type: ClusterIP
|
||||
|
||||
# If type is set to "NodePort", a specific nodePort value can be configured,
|
||||
# will be random if left blank.
|
||||
#nodePort: 30000
|
||||
|
||||
# Port on which Vault server is listening
|
||||
port: 8200
|
||||
# Target port to which the service should be mapped to
|
||||
targetPort: 8200
|
||||
# Extra annotations for the service definition. This can either be YAML or a
|
||||
# YAML-formatted multi-line templated string map of the annotations to apply
|
||||
# to the service.
|
||||
annotations: {}
|
||||
|
||||
# This configures the Vault Statefulset to create a PVC for data
|
||||
# storage when using the file or raft backend storage engines.
|
||||
# See https://www.vaultproject.io/docs/configuration/storage/index.html to know more
|
||||
dataStorage:
|
||||
enabled: true
|
||||
# Size of the PVC created
|
||||
size: 10Gi
|
||||
# Location where the PVC will be mounted.
|
||||
mountPath: "/vault/data"
|
||||
# Name of the storage class to use. If null it will use the
|
||||
# configured default Storage Class.
|
||||
storageClass: null
|
||||
# Access Mode of the storage device being used for the PVC
|
||||
accessMode: ReadWriteOnce
|
||||
# Annotations to apply to the PVC
|
||||
annotations: {}
|
||||
|
||||
# This configures the Vault Statefulset to create a PVC for audit
|
||||
# logs. Once Vault is deployed, initialized and unseal, Vault must
|
||||
# be configured to use this for audit logs. This will be mounted to
|
||||
# /vault/audit
|
||||
# See https://www.vaultproject.io/docs/audit/index.html to know more
|
||||
auditStorage:
|
||||
enabled: false
|
||||
# Size of the PVC created
|
||||
size: 10Gi
|
||||
# Location where the PVC will be mounted.
|
||||
mountPath: "/vault/audit"
|
||||
# Name of the storage class to use. If null it will use the
|
||||
# configured default Storage Class.
|
||||
storageClass: null
|
||||
# Access Mode of the storage device being used for the PVC
|
||||
accessMode: ReadWriteOnce
|
||||
# Annotations to apply to the PVC
|
||||
annotations: {}
|
||||
|
||||
# Run Vault in "dev" mode. This requires no further setup, no state management,
|
||||
# and no initialization. This is useful for experimenting with Vault without
|
||||
# needing to unseal, store keys, et. al. All data is lost on restart - do not
|
||||
# use dev mode for anything other than experimenting.
|
||||
# See https://www.vaultproject.io/docs/concepts/dev-server.html to know more
|
||||
dev:
|
||||
enabled: false
|
||||
|
||||
# Run Vault in "standalone" mode. This is the default mode that will deploy if
|
||||
# no arguments are given to helm. This requires a PVC for data storage to use
|
||||
# the "file" backend. This mode is not highly available and should not be scaled
|
||||
# past a single replica.
|
||||
standalone:
|
||||
enabled: "-"
|
||||
|
||||
# config is a raw string of default configuration when using a Stateful
|
||||
# deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data
|
||||
# and store data there. This is only used when using a Replica count of 1, and
|
||||
# using a stateful set. This should be HCL.
|
||||
|
||||
# Note: Configuration files are stored in ConfigMaps so sensitive data
|
||||
# such as passwords should be either mounted through extraSecretEnvironmentVars
|
||||
# or through a Kube secret. For more information see:
|
||||
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
|
||||
config: |
|
||||
ui = true
|
||||
|
||||
listener "tcp" {
|
||||
tls_disable = 1
|
||||
address = "[::]:8200"
|
||||
cluster_address = "[::]:8201"
|
||||
}
|
||||
storage "file" {
|
||||
path = "/vault/data"
|
||||
}
|
||||
|
||||
# Example configuration for using auto-unseal, using Google Cloud KMS. The
|
||||
# GKMS keys must already exist, and the cluster must have a service account
|
||||
# that is authorized to access GCP KMS.
|
||||
#seal "gcpckms" {
|
||||
# project = "vault-helm-dev"
|
||||
# region = "global"
|
||||
# key_ring = "vault-helm-unseal-kr"
|
||||
# crypto_key = "vault-helm-unseal-key"
|
||||
#}
|
||||
|
||||
# Run Vault in "HA" mode. There are no storage requirements unless audit log
|
||||
# persistence is required. In HA mode Vault will configure itself to use Consul
|
||||
# for its storage backend. The default configuration provided will work the Consul
|
||||
# Helm project by default. It is possible to manually configure Vault to use a
|
||||
# different HA backend.
|
||||
ha:
|
||||
enabled: false
|
||||
replicas: 3
|
||||
|
||||
# Set the api_addr configuration for Vault HA
|
||||
# See https://www.vaultproject.io/docs/configuration#api_addr
|
||||
# If set to null, this will be set to the Pod IP Address
|
||||
apiAddr: null
|
||||
|
||||
# Enables Vault's integrated Raft storage. Unlike the typical HA modes where
|
||||
# Vault's persistence is external (such as Consul), enabling Raft mode will create
|
||||
# persistent volumes for Vault to store data according to the configuration under server.dataStorage.
|
||||
# The Vault cluster will coordinate leader elections and failovers internally.
|
||||
raft:
|
||||
|
||||
# Enables Raft integrated storage
|
||||
enabled: false
|
||||
# Set the Node Raft ID to the name of the pod
|
||||
setNodeId: false
|
||||
|
||||
# Note: Configuration files are stored in ConfigMaps so sensitive data
|
||||
# such as passwords should be either mounted through extraSecretEnvironmentVars
|
||||
# or through a Kube secret. For more information see:
|
||||
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
|
||||
config: |
|
||||
ui = true
|
||||
|
||||
listener "tcp" {
|
||||
tls_disable = 1
|
||||
address = "[::]:8200"
|
||||
cluster_address = "[::]:8201"
|
||||
}
|
||||
|
||||
storage "raft" {
|
||||
path = "/vault/data"
|
||||
}
|
||||
|
||||
service_registration "kubernetes" {}
|
||||
|
||||
# config is a raw string of default configuration when using a Stateful
|
||||
# deployment. Default is to use a Consul for its HA storage backend.
|
||||
# This should be HCL.
|
||||
|
||||
# Note: Configuration files are stored in ConfigMaps so sensitive data
|
||||
# such as passwords should be either mounted through extraSecretEnvironmentVars
|
||||
# or through a Kube secret. For more information see:
|
||||
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
|
||||
config: |
|
||||
ui = true
|
||||
|
||||
listener "tcp" {
|
||||
tls_disable = 1
|
||||
address = "[::]:8200"
|
||||
cluster_address = "[::]:8201"
|
||||
}
|
||||
storage "consul" {
|
||||
path = "vault"
|
||||
address = "HOST_IP:8500"
|
||||
}
|
||||
|
||||
service_registration "kubernetes" {}
|
||||
|
||||
# Example configuration for using auto-unseal, using Google Cloud KMS. The
|
||||
# GKMS keys must already exist, and the cluster must have a service account
|
||||
# that is authorized to access GCP KMS.
|
||||
#seal "gcpckms" {
|
||||
# project = "vault-helm-dev-246514"
|
||||
# region = "global"
|
||||
# key_ring = "vault-helm-unseal-kr"
|
||||
# crypto_key = "vault-helm-unseal-key"
|
||||
#}
|
||||
|
||||
# A disruption budget limits the number of pods of a replicated application
|
||||
# that are down simultaneously from voluntary disruptions
|
||||
disruptionBudget:
|
||||
enabled: true
|
||||
|
||||
# maxUnavailable will default to (n/2)-1 where n is the number of
|
||||
# replicas. If you'd like a custom value, you can specify an override here.
|
||||
maxUnavailable: null
|
||||
|
||||
# Definition of the serviceAccount used to run Vault.
|
||||
# These options are also used when using an external Vault server to validate
|
||||
# Kubernetes tokens.
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
# Extra annotations for the serviceAccount definition. This can either be
|
||||
# YAML or a YAML-formatted multi-line templated string map of the
|
||||
# annotations to apply to the serviceAccount.
|
||||
annotations: {}
|
||||
|
||||
# Settings for the statefulSet used to run Vault.
|
||||
statefulSet:
|
||||
# Extra annotations for the statefulSet. This can either be YAML or a
|
||||
# YAML-formatted multi-line templated string map of the annotations to apply
|
||||
# to the statefulSet.
|
||||
annotations: {}
|
||||
|
||||
# Vault UI
|
||||
ui:
|
||||
# True if you want to create a Service entry for the Vault UI.
|
||||
#
|
||||
# serviceType can be used to control the type of service created. For
|
||||
# example, setting this to "LoadBalancer" will create an external load
|
||||
# balancer (for supported K8S installations) to access the UI.
|
||||
enabled: false
|
||||
publishNotReadyAddresses: true
|
||||
# The service should only contain selectors for active Vault pod
|
||||
activeVaultPodOnly: false
|
||||
serviceType: "ClusterIP"
|
||||
serviceNodePort: null
|
||||
externalPort: 8200
|
||||
|
||||
# loadBalancerSourceRanges:
|
||||
# - 10.0.0.0/16
|
||||
# - 1.78.23.3/32
|
||||
|
||||
# loadBalancerIP:
|
||||
|
||||
# Extra annotations to attach to the ui service
|
||||
# This can either be YAML or a YAML-formatted multi-line templated string map
|
||||
# of the annotations to apply to the ui service
|
||||
annotations: {}
|
||||
|
||||
Reference in New Issue
Block a user