Totally revamp cluster chart configs

This commit is contained in:
Jonas Juselius
2020-11-05 21:05:34 +01:00
parent 6fea8b3bc8
commit 3a69e7f1f1
55 changed files with 3921 additions and 531 deletions

439
charts/anchore/anchore.yaml Normal file
View File

@@ -0,0 +1,439 @@
# Default values for anchore_engine chart.
# Anchore engine has a dependency on Postgresql, configure here
postgresql:
# To use an external DB or Google CloudSQL in GKE, uncomment & set 'enabled: false'
# externalEndpoint, postgresUser, postgresPassword & postgresDatabase are required values for external postgres
# enabled: false
postgresUser: anchoreengine
postgresPassword: KebabNinja2020
postgresDatabase: anchore
# Specify an external (already existing) postgres deployment for use.
# Set to the host and port. eg. mypostgres.myserver.io:5432
externalEndpoint: Null
# Configure size of the persistent volume used with helm managed chart.
# This should be commented out if using an external endpoint.
persistence:
storageClass: managed-nfs-storage
resourcePolicy: nil
size: 20Gi
ingress:
enabled: true
labels: {}
# Exposing the feeds API w/ ingress is for special cases only, uncomment feedsPath if external access to the feeds API is needed
# feedsPath: /v1/feeds/
apiPath: /v1/
uiPath: /
# Uncomment the following lines to bind on specific hostnames
# apiHosts:
# - anchore-api.example.com
# uiHosts:
# - anchore-ui.example.com
# feedsHosts:
# - anchore-feeds.example.com
annotations:
kubernetes.io/ingress.class: nginx
certmanager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/ssl-redirect: "true"
tls:
- secretName: anchore-tls
hosts:
- anchore.k2.local
# Global configuration shared by all anchore-engine services.
anchoreGlobal:
# Image used for all anchore engine deployments (excluding enterprise components).
image: docker.io/anchore/anchore-engine:v0.8.1
imagePullPolicy: IfNotPresent
# Set image pull secret name if using an anchore-engine image from a private registry
imagePullSecretName:
# Set this value to True to setup the chart for OpenShift deployment compatibility.
openShiftDeployment: False
# Add additionnal labels to all kubernetes resources
labels: {}
# app.kubernetes.io/managed-by: Helm
# foo: bar
# Set extra environment variables. These will be set on all containers.
extraEnv: []
# - name: foo
# value: bar
# Specifies an existing secret to be used for admin and db passwords
existingSecret: Null
# The scratchVolume controls the mounting of an external volume for scratch space for image analysis. Generally speaking
# you need to provision 3x the size of the largest image (uncompressed) that you want to analyze for this space.
scratchVolume:
mountPath: /analysis_scratch
details:
# Specify volume configuration here
emptyDir: {}
# A secret must be created in the same namespace as anchore-engine is deployed, containing the certificates & public/private keys used for SSL, SAML & custom CAs.
# Certs and keys should be added using the file name the certificate is stored at. This secret will be mounted to /home/anchore/certs.
certStoreSecretName: Null
# Specify your pod securityContext here, by default the anchore images utilize the user/group 'anchore' using uid/gid 1000
# To disable this securityContext comment out `runAsUser` & `runAsGroup`
securityContext:
runAsUser: 1000
runAsGroup: 1000
###
# Start of General Anchore Engine Configurations (populates /config/config.yaml)
###
# Set where default configs are placed at startup. This must be a writable location for the pod.
serviceDir: /anchore_service
logLevel: INFO
cleanupImages: true
# Define timeout, in seconds, for image analysis
imageAnalyzeTimeoutSeconds: 36000
# If true, when a user adds an ECR registry with username = awsauto then the system will look for an instance profile to use for auth against the registry
allowECRUseIAMRole: false
# Enable prometheus metrics
enableMetrics: true
# Disable auth on prometheus metrics
metricsAuthDisabled: false
# Sets the password & email address for the default anchore-engine admin user.
defaultAdminPassword: KebabNinja2020
defaultAdminEmail: jonas.juselius@tromso.serit.no
saml:
# Locations for keys used for signing and encryption. Only one of 'secret' or 'public_key_path'/'private_key_path' needs to be set. If all are set then the keys take precedence over the secret value
# Secret is for a shared secret and if set, all components in anchore should have the exact same value in their configs.
secret: Null
privateKeyName: Null
publicKeyName: Null
oauthEnabled: false
oauthTokenExpirationSeconds: 3600
# Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you
# don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well)
# WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords
hashedPasswords: false
# Configure the database connection within anchore-engine & enterprise-ui. This may get split into 2 different configurations based on service utilized.
dbConfig:
timeout: 120
# Use ssl, but the default postgresql config in helm's stable repo does not support ssl on server side, so this should be set for external dbs only.
# All ssl dbConfig values are only utilized when ssl=true
ssl: false
sslMode: verify-full
# sslRootCertName is the name of the postgres root CA certificate stored in anchoreGlobal.certStoreSecretName
sslRootCertName: Null
connectionPoolSize: 30
connectionPoolMaxOverflow: 100
internalServicesSsl:
# Enable to force all anchore-engine services to communicate internally using SSL
enabled: false
# specify whether cert is verfied against the local certifacte bundle (allow self-signed certs if set to false)
verifyCerts: false
certSecretKeyName: Null
certSecretCertName: Null
# To enable webhooks, set webhooksEnabled: true
webhooksEnabled: true
# Configure webhook outputs here. The service provides these webhooks for notifying external systems of updates
webhooks:
# User and password to be set (using HTTP basic auth) on all webhook calls if necessary
webhook_user: Null
webhook_pass: Null
ssl_verify: false
# Endpoint for general notification delivery. These events are image/tag updates etc. This is globally configured
# and updates for all users are sent to the same host but with a different path for each user.
# <notification_type>/<userId> are required as documented at end of URI - only hostname:port should be configured.
general:
url: http://busynix.default
# url: "http://somehost:9090/<notification_type>/<userId>"
# Allow configuration of Kubernetes probes
probes:
liveness:
initialDelaySeconds: 120
timeoutSeconds: 10
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readiness:
timeoutSeconds: 10
periodSeconds: 10
failureThreshold: 3
successThreshold: 1
# Configuration for the analyzer pods that perform image analysis
# There may be many of these analyzers but best practice is to not have more than one per node since analysis
# is very IO intensive. Use of affinity/anti-affinity rules for scheduling the analyzers is future work.
anchoreAnalyzer:
replicaCount: 1
containerPort: 8084
# Set extra environment variables. These will be set only on analyzer containers.
extraEnv: []
# - name: foo
# value: bar
# The cycle timer is the interval between checks to the work queue for new jobs
cycleTimers:
image_analyzer: 5
# Controls the concurrency of the analyzer itself. Can be configured to process more than one task at a time, but it IO bound, so may not
# necessarily be faster depending on hardware. Should test and balance this value vs. number of analyzers for your deployment cluster performance.
concurrentTasksPerWorker: 1
# Image layer caching can be enabled to speed up image downloads before analysis.
# This chart sets up a scratch directory for all analyzer pods using the values found at anchoreGlobal.scratchVolume.
# When setting anchoreAnalyzer.layerCacheMaxGigabytes, ensure the scratch volume has suffient storage space.
# For more info see - https://docs.anchore.com/current/docs/engine/engine_installation/storage/layer_caching/
# Enable image layer caching by setting a cache size > 0GB.
layerCacheMaxGigabytes: 0
# Enable the ability to read a user-supplied 'hints' file to allow users to override and/or augment the software artifacts that are discovered by anchore during its image analysis process.
# Once enabled, the analyzer services will look for a file with a specific name, location and format located within the container image - /anchore_hints.json
# For more info see - https://docs.anchore.com/current/docs/engine/engine_installation/configuration/content_hints
enableHints: false
configFile:
# Anchore analyzer config file
#
# WARNING - malforming this file can cause the analyzer to fail on all image analysis
#
# Options for any analyzer module(s) that takes customizable input
#
# example configuration for the 'retrieve_files' analyzer, if installed
retrieve_files:
file_list:
- '/etc/passwd'
# - '/etc/services'
# - '/etc/sudoers'
# example configuration for the 'content_search' analyze, if installed
secret_search:
match_params:
- MAXFILESIZE=10000
- STOREONMATCH=n
regexp_match:
- "AWS_ACCESS_KEY=(?i).*aws_access_key_id( *=+ *).*(?<![A-Z0-9])[A-Z0-9]{20}(?![A-Z0-9]).*"
- "AWS_SECRET_KEY=(?i).*aws_secret_access_key( *=+ *).*(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=]).*"
- "PRIV_KEY=(?i)-+BEGIN(.*)PRIVATE KEY-+"
- "DOCKER_AUTH=(?i).*\"auth\": *\".+\""
- "API_KEY=(?i).*api(-|_)key( *=+ *).*(?<![A-Z0-9])[A-Z0-9]{20,60}(?![A-Z0-9]).*"
# - "ALPINE_NULL_ROOT=^root:::0:::::$"
# content_search:
# match_params:
# - MAXFILESIZE=10000
# regexp_match:
# - "EXAMPLE_MATCH="
# Uncomment the 'malware' section to enable use of the open-source ClamAV malware scanner to detect malicious code embedded in container images.
# This scan occurs only at analysis time when the image content itself is available, and the scan results are available via the Engine API as well as
# for consumption in new policy gates to allow gating of image with malware findings.
# For more detailed configuration info see - https://docs.anchore.com/current/docs/engine/general/concepts/images/analysis/malware_scanning
#
malware:
clamav:
enabled: true
db_update_enabled: true
# resources:
# limits:
# cpu: 1
# memory: 4G
# requests:
# cpu: 1
# memory: 1G
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod configuration for the anchore engine api service.
anchoreApi:
replicaCount: 1
# Set extra environment variables. These will be set on all api containers.
extraEnv: []
# - name: foo
# value: bar
# kubernetes service configuration for anchore external API
service:
type: ClusterIP
port: 8228
annotations: {}
label: {}
# (Optional) Overrides for constructing API URLs. All values are optional.
# external:
# use_tls: true
# hostname: anchore-api.example.com
# port: 8443
# resources:
# limits:
# cpu: 1
# memory: 4G
# requests:
# cpu: 100m
# memory: 1G
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
anchoreCatalog:
replicaCount: 1
# Set extra environment variables. These will be set on all catalog containers.
extraEnv: []
# - name: foo
# value: bar
# Intervals to run specific events on (seconds)
cycleTimers:
# Interval to check for an update to a tag
image_watcher: 3600
# Interval to run a policy evaluation on images with the policy_eval subscription activated.
policy_eval: 3600
# Interval to run a vulnerability scan on images with the vuln_update subscription activated.
vulnerability_scan: 14400
# Interval at which the catalog looks for new work to put on the image analysis queue.
analyzer_queue: 1
# Interval notifications will be processed for state changes
notifications: 30
# Intervals service state updates are polled for the system status
service_watcher: 15
# Interval between checks to repo for new tags
repo_watcher: 60
# Event log configuration for webhooks
events:
notification:
enabled: false
# Send notifications for events with severity level that matches items in this list
level:
- error
# - info
archive:
compression:
enabled: true
min_size_kbytes: 100
storage_driver:
# Valid storage driver names: 'db', 's3', 'swift'
name: s3
config:
url: https://minio.staging.itpartner.no
bucket: anchore
access_key: Mkd324ijlnfll23883
secret_key: KJQfefrnflol93jpj31mrkjs3i88sj2L
create_bucket: true
# kubernetes service configuration for anchore catalog api
service:
type: ClusterIP
port: 8082
annotations: {}
labels: {}
# resources:
# limits:
# cpu: 1
# memory: 2G
# requests:
# cpu: 100m
# memory: 500M
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod configuration for the anchore engine policy service.
anchorePolicyEngine:
replicaCount: 1
# Set extra environment variables. These will be set on all policy engine containers.
extraEnv: []
# - name: foo
# value: bar
# Intervals to run specific events on (seconds)
cycleTimers:
# Interval to run a feed sync to get latest cve data
feed_sync: 14400
# Interval between checks to see if there needs to be a task queued
feed_sync_checker: 3600
# kubernetes service configuration for anchore policy engine api
service:
type: ClusterIP
port: 8087
annotations: {}
labels: {}
# resources:
# limits:
# cpu: 1
# memory: 4G
# requests:
# cpu: 100m
# memory: 1G
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod configuration for the anchore engine simplequeue service.
anchoreSimpleQueue:
replicaCount: 1
# Set extra environment variables. These will be set on all simplequeue containers.
extraEnv: []
# - name: foo
# value: bar
# kubernetes service configuration for anchore simplequeue api
service:
type: ClusterIP
port: 8083
annotations: {}
labels: {}
# resources:
# limits:
# cpu: 1
# memory: 1G
# requests:
# cpu: 100m
# memory: 256M
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: busynix
labels:
app: busynix
spec:
replicas: 1
selector:
matchLabels:
app: busynix
template:
metadata:
labels:
app: busynix
spec:
dnsPolicy: Default
containers:
- image: registry.gitlab.com/serit/k8s/busynix:1.1
name: busynix
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- "-c"
- "while true; do echo ping | nc -l -p 8000; done"
ports:
- containerPort: 8000
imagePullSecrets:
- name: gitlab-pull-secret

12
charts/busynix/deploy.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
namespace=default
charts=(
busynix.yaml
)
. ../config.sh
kubectl_apply $namespace "${charts[@]}"

View File

@@ -0,0 +1,9 @@
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lVWXVkVEZ2VEpEYW1QUHRSNFh6dzJTMGROMzZjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEU1TVRBeE5UQTVNakV3Ck1Gb1hEVEkwTVRBeE16QTVNakV3TUZvd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUsKRXdWT2FYaFBVekVuTUNVR0ExVUVDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWagpNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVrV0lONVhwRkI2NTJuazdzUGx1CklqNTZTY05sakpOV0JZZXNWaDE4MjhNUE9VVHpuZTgxeUhTMHg3WFVtcGN5VDdDNmRQdlVWckdZUUZCMnZqN0oKcm9Td04xWGtZeW10YXVQVFpqV2J6ZVdSbG1nZ0dpajFhUUZOZy9LelkzZTBkcGdVUEk3TXZQcjFJU001c3JuWgp0YlRRckZmMkVsc3dvVENPMWNYRGMyNTJnblphRW56ZlJEN0o2eXoyVXYvMUZSei9aY29DZVViSnJkNWJjTkk5CmdKYU95MDE0MEdHRzY3WWRtZXBjQWJXeTFOYkFNWlJCamFUUStmZUVWa0p4UGNRNWZqMUhrQ0RuTHJjeEpmdzEKSWhvZFZlNFdLTkhyaUFGR3JTS3JIM1VLb045RFVtU1RGOVVEUWtOQS9zNTRkaEJyMWJFa25lMW1EbGwwYWZhWApqd0lEQVFBQm8wSXdRREFPQmdOVkhROEJBZjhFQkFNQ0FRWXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WCkhRNEVGZ1FVUFQ2THhyWkNVcFEyakRtQ01DRGQ2aVVMbXdnd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLaXMKaFc2bGRBVjZjbnZlM3dhem5aRTVzT1B5T1hRWFdEc2x0a3RzU3B6WU9ocjU5WTZ3Q2oxL05vbGRRaXY4UUJ4TwpQYVF3QUJSL3Evc0p1bUhEODJwSzRwbDZXOThWTDFPdW1wOEoxZjFGV0ZqKzJpMitObG1UUzlHU3FhcGRMdTJoCk9YTUJ6M0JDZFZWSVQ4RGhzZVRBL21WeXlRV1h3LzlsUTYxT1N1c1BubmUwcDBwbkJGTU1TVUhNWFZWeGsrK2UKYzdNTGVVcXFObHliM1JDeGNVQkVTa1h3d1dIaFhhdVR0OTlGQjB5SFNKeTMxd0FNL2pHeUJhdlphb3VMRGticQowNXdwQ3dxQzl6SEFlZElUcThXOUhOQVA5Q1FjejdsV3lDRHhkZ2orN2hoeEdrUUoyYmpFMGxlWlA1bXphRXUyCjdPYUlDVkR0cGE5T2FXY3FpSUE9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdWtXSU41WHBGQjY1Mm5rN3NQbHVJajU2U2NObGpKTldCWWVzVmgxODI4TVBPVVR6Cm5lODF5SFMweDdYVW1wY3lUN0M2ZFB2VVZyR1lRRkIydmo3SnJvU3dOMVhrWXltdGF1UFRaaldiemVXUmxtZ2cKR2lqMWFRRk5nL0t6WTNlMGRwZ1VQSTdNdlByMUlTTTVzcm5adGJUUXJGZjJFbHN3b1RDTzFjWERjMjUyZ25aYQpFbnpmUkQ3SjZ5ejJVdi8xRlJ6L1pjb0NlVWJKcmQ1YmNOSTlnSmFPeTAxNDBHR0c2N1lkbWVwY0FiV3kxTmJBCk1aUkJqYVRRK2ZlRVZrSnhQY1E1ZmoxSGtDRG5McmN4SmZ3MUlob2RWZTRXS05IcmlBRkdyU0tySDNVS29OOUQKVW1TVEY5VURRa05BL3M1NGRoQnIxYkVrbmUxbURsbDBhZmFYandJREFRQUJBb0lCQURlY2RyU1Jyd3B3dWUvOQo3ZEpDUlowM3BlN0x4VStZMVQvRlo3QTJFWWJTejRLN2tUZi9xTUQyQnRydzhFNVBBWmhGSFFXOEFZTDRkb29NCis4YUVuRlhjQzNnWnpwdkVYNS9mOE1jOURnMEVrdGlpc2FMZHc1YmI2cmFRQ1BxVFRHOXpxK2NBZ3lPUWtRcVoKcDZveVRrc3ZkblZkdFh0NWVpVkZoLzlPa2JHdUw3Y0RFNm03c1dDOVIwemdQQmFtTThXNDBJaGJqcVBod25mSgpYMmd0TkFzMjhpNE05aHIweVlKbTk4VHh5Tk9laVlxdFRrRkFROGVRRzRvck9mSDVvTTdONmcxc295VHpYOFlhCkorZHdHQnpuMFFQV25UVGpMUy9uSm9Ld3BEc3AySngvbTluSUFGanl4WERkT0xHVERQMW1hbFZsVHAyM1VMV0gKNE8vd1hJRUNnWUVBOVBTZ1BmSzNCZjBaeEpHNVFKWGNIdzRUM1pWSFRVV2FzNFpoc1hCRWd0SnQ5NHdIamh2egpuUTV0LzJlMlNNSXU1Q1g5MnRVYjBGQTRqRW8ybDdBNTlXY2NscWRQS0pQR2QveENvbXFQcTZsK0NWckh4NkhsCncvRTJpdHRrVWpicDF2ZjlFWTBsRGwwdkJ1QzVzQlY5OEZET2lFaThCSC92VnowR3QvZm53cThDZ1lFQXdxdU4KZkRwZUtEOHVWZ3lOQUdrSVlXWDl1RFkvL2pENHdQVXJGVktIalB4Y2ZWUDAra2FYVzNnM0cwcnBaU2xLME1LeQprdk9PaW1wb0svUk5wMUJ4bHZnSkdaL0NZeHNsWWZhSUtEenR1OGRnTlFKbktjU0s4S3krQ1c5cWQ2SEk3MGRUCnRManN6TnlGWnR6YmxQVDlJS1ZVb09kUjNUZ0JEOFpaNW1VZE1TRUNnWUVBMTBWZ3lmQlRMbDBuVnh2bDNUK1cKU21oNXh1TVkzV3pOTHZaclJwL3VaYlgrMUcxb2VsaFFPUGRrYmUrOFA2Yi94SnNxeERWREUwaGMyZFdpc3A0NQo0VlB6eU9aU1o4dXV3T1dkdmRTK1hGTkJJNEYzVHhjVnNLUjhsWDJIWmNWQ3Jod1VlR2M5YUtrMTJlcUc1WnVOCnFUT3F2aFNGdjhLYkdXVFZVYm41SUpzQ2dZQXNiQTlXT00rQnFhRStPNUQxZGdKV2ozcU5UYnZlbW5nNU4va0QKM1poWEFNTnVTZmxPSDB1c3NNc0tiRmJFSWQ4VHNrb2pwSFp6ZVlLWi9SNjNQbkVkUFM1K2JXbGpTaStHb2taQgp3RjJUaXhTV0pCMDhkOEFvMlpKbi9zZXgwdlpTTzltTEZPUGNmN25sVmlLNVpBcGJKNzhmRklvbXkvL2FCSzVCCkYvUElZUUtCZ0J1dVR1MVFhVVBTczE2Zi81MTVPSjJaQXYvOE82bXRmWjJnQ0RJSWYvZG04VFFOQTE5UnVBeXUKYU1WMGxPMXphSC9tR0ZFSkRyYUNFVkFSd1MzNkY0eWFyNUI4WDdtaTVsdGZUTm5hZEsyZDJuaWRldjFMWlZETgo2K3pHaUpIb1BTaEJXRjdYanh5aEdwcmdGczZhdE5Fc28zTFpHMEdZSHBHOFNGakljb1VmCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: cluster-ca
namespace: cert-manager
type: kubernetes.io/tls

View File

@@ -0,0 +1,37 @@
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-production
namespace: kube-system
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: @acme_email@
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-production
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: ca-issuer
namespace: kube-system
spec:
ca:
secretName: cluster-ca
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: selfsigning-issuer
namespace: kube-system
spec:
selfSigned: {}

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
version="v1.0.4"
namespace=kube-system
charts=(
cluster-ca.yaml
cluster-issuer.yaml
front-proxy-client.yaml
)
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
kubectl apply \
-n $namespace \
--validate=false \
-f https://github.com/jetstack/cert-manager/releases/download/$version/cert-manager.yaml

View File

@@ -0,0 +1,35 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: front-proxy-client
subjects:
- kind: User
name: front-proxy-client
apiGroup: rbac.authorization.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: front-proxy-client
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: front-proxy-client
rules:
- apiGroups:
- "webhook.cert-manager.io"
resources:
- mutations
- validations
verbs: [ "*" ]
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch

View File

@@ -0,0 +1,24 @@
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
prometheus:
enabled: true
servicemonitor:
enabled: true
prometheusInstance: default
targetPort: 9402
path: /metrics
interval: 60s
scrapeTimeout: 30s
labels: {}
webhook:
enabled: true
replicaCount: 1
cainjector:
enabled: true

34
charts/config.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
vars=(
initca="@initca@"
apiserver="@apiserver@"
cluster="@cluster@"
ingress_nodes="@ingress_nodes@"
filseserver="@fileserver@"
acme_email="@acme_email@"
grafana_ldap_toml="@grafana_ldap_toml@"
grafana_smtp_user="@grafana_smtp_user@"
grafana_smtp_password="@grafana_smtp_password@"
)
make_substitutions () {
read x
for i in "${vars[@]}"; do
k=$(echo "$i" | cut -d= -f1)
v=$(echo "$i" | cut -d= -f2)
echo "$x" | sed "s/@$k@/$v/g"
done
}
kubectl_apply () {
ns=$1; shift
charts=("$@")
for i in "${charts[@]}"; do
k=$(echo "$i" | cut -d= -f1)
v=$(echo "$i" | cut -d= -f2)
sed "s/@$k@/$v/g" $i | kubectl apply -n $ns -f -
done
}
# vim:ft=sh

19
charts/ingress-nginx/deploy.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
namespace=kube-system
charts=()
. ../config.sh
kubectl_apply "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
ingress \
ingress-nginx/ingress-nginx \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -0,0 +1,688 @@
## nginx configuration
## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/configuration.md
##
controller:
image:
repository: k8s.gcr.io/ingress-nginx/controller
tag: "v0.40.2"
digest: sha256:46ba23c3fbaafd9e5bd01ea85b2f921d9f2217be082580edc22e6c704a83f02f
pullPolicy: IfNotPresent
# www-data -> uid 101
runAsUser: 101
allowPrivilegeEscalation: true
# Configures the ports the nginx-controller listens on
containerPort:
http: 80
https: 443
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
config: {}
## Annotations to be added to the controller config configuration configmap
##
configAnnotations: {}
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
proxySetHeaders: {}
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
addHeaders: {}
# Optionally customize the pod dnsConfig.
dnsConfig: {}
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
dnsPolicy: ClusterFirst
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
reportNodeInternalIp: false
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
# is merged
hostNetwork: true
## Use host ports 80 and 443
## Disabled by default
##
hostPort:
enabled: false
ports:
http: 80
https: 443
## Election ID to use for status update
##
electionID: ingress-controller-leader
## Name of the ingress class to route through this controller
##
ingressClass: nginx
# labels to add to the pod container metadata
podLabels: {}
# key: value
## Security Context policies for controller pods
##
podSecurityContext: {}
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
## notes on enabling and using sysctls
###
sysctls: {}
# sysctls:
# "net.core.somaxconn": "8192"
## Allows customization of the source of the IP address or FQDN to report
## in the ingress status field. By default, it reads the information provided
## by the service. If disable, the status field reports the IP address of the
## node or nodes where an ingress controller pod is running.
publishService:
enabled: true
## Allows overriding of the publish service to bind to
## Must be <namespace>/<service_name>
##
pathOverride: ""
## Limit the scope of the controller
##
scope:
enabled: false
namespace: "" # defaults to .Release.Namespace
## Allows customization of the configmap / nginx-configmap namespace
##
configMapNamespace: "" # defaults to .Release.Namespace
## Allows customization of the tcp-services-configmap
##
tcp:
configMapNamespace: "" # defaults to .Release.Namespace
## Annotations to be added to the tcp config configmap
annotations: {}
## Allows customization of the udp-services-configmap
##
udp:
configMapNamespace: "" # defaults to .Release.Namespace
## Annotations to be added to the udp config configmap
annotations: {}
## Additional command line arguments to pass to nginx-ingress-controller
## E.g. to specify the default SSL certificate you can use
## extraArgs:
## default-ssl-certificate: "<namespace>/<secret_name>"
extraArgs: {}
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## DaemonSet or Deployment
##
kind: Deployment
## Annotations to be added to the controller Deployment or DaemonSet
##
annotations: {}
# keel.sh/pollSchedule: "@every 60m"
## Labels to be added to the controller Deployment or DaemonSet
##
labels: {}
# keel.sh/policy: patch
# keel.sh/trigger: poll
# The update strategy to apply to the Deployment or DaemonSet
##
updateStrategy: {}
# rollingUpdate:
# maxUnavailable: 1
# type: RollingUpdate
# minReadySeconds to avoid killing pods before we are ready
##
minReadySeconds: 0
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
value: "true"
- effect: NoSchedule
key: unschedulable
operator: Equal
value: "true"
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
## Affinity and anti-affinity
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: [ @ingress_nodes@ ]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- ingress-nginx
- key: app.kubernetes.io/instance
operator: In
values:
- ingress-nginx
- key: app.kubernetes.io/component
operator: In
values:
- controller
topologyKey: "kubernetes.io/hostname"
# # An example of preferred pod anti-affinity, weight is in the range 1-100
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: kubernetes.io/hostname
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: ingress-nginx-internal
## terminationGracePeriodSeconds
## wait up to five minutes for the drain of connections
##
terminationGracePeriodSeconds: 300
## Node labels for controller pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector:
kubernetes.io/os: linux
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
port: 10254
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
port: 10254
# Path of the health check endpoint. All requests received on the port defined by
# the healthz-port parameter are forwarded internally to this path.
healthCheckPath: "/healthz"
## Annotations to be added to controller pods
##
podAnnotations: {}
replicaCount: @ingress_replicas@
minAvailable: 3
# Define requests resources to avoid probe issues due to CPU utilization in busy nodes
# ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
# Ideally, there should be no limits.
# https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
resources:
# limits:
# cpu: 100m
# memory: 90Mi
requests:
cpu: 100m
memory: 90Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 11
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
autoscalingTemplate: []
# Custom or additional autoscaling metrics
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
# - type: Pods
# pods:
# metric:
# name: nginx_ingress_controller_nginx_process_requests_total
# target:
# type: AverageValue
# averageValue: 10000m
## Enable mimalloc as a drop-in replacement for malloc.
## ref: https://github.com/microsoft/mimalloc
##
enableMimalloc: true
## Override NGINX template
customTemplate:
configMapName: ""
configMapKey: ""
service:
enabled: true
annotations: {}
labels: {}
# clusterIP: ""
## List of IP addresses at which the controller services are available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
enableHttp: true
enableHttps: true
## Set external traffic policy to: "Local" to preserve source IP on
## providers supporting it
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
# externalTrafficPolicy: ""
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
# sessionAffinity: ""
# specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isnt specified,
# the service controller allocates a port from your clusters NodePort range.
# Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# healthCheckNodePort: 0
ports:
http: 80
https: 443
targetPorts:
http: http
https: https
type: ClusterIP
# type: NodePort
# nodePorts:
# http: 32080
# https: 32443
# tcp:
# 8080: 32808
nodePorts:
http: 30080
https: 30443
tcp: {}
udp: {}
## Enables an additional internal load balancer (besides the external one).
## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
internal:
enabled: false
annotations: {}
## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
loadBalancerSourceRanges: []
## Set external traffic policy to: "Local" to preserve source IP on
## providers supporting it
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
# externalTrafficPolicy: ""
extraContainers: []
## Additional containers to be added to the controller pod.
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
# - name: my-sidecar
# image: nginx:latest
# - name: lemonldap-ng-controller
# image: lemonldapng/lemonldap-ng-controller:0.2.0
# args:
# - /lemonldap-ng-controller
# - --alsologtostderr
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
# env:
# - name: POD_NAME
# valueFrom:
# fieldRef:
# fieldPath: metadata.name
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: metadata.namespace
# volumeMounts:
# - name: copy-portal-skins
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
extraVolumeMounts: []
## Additional volumeMounts to the controller main container.
# - name: copy-portal-skins
# mountPath: /var/lib/lemonldap-ng/portal/skins
extraVolumes: []
## Additional volumes to the controller pod.
# - name: copy-portal-skins
# emptyDir: {}
extraInitContainers: []
## Containers, which are run before the app containers are started.
# - name: init-myservice
# image: busybox
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
admissionWebhooks:
annotations: {}
enabled: true
failurePolicy: Fail
# timeoutSeconds: 10
port: 8443
certificate: "/usr/local/certificates/cert"
key: "/usr/local/certificates/key"
namespaceSelector: {}
objectSelector: {}
service:
annotations: {}
# clusterIP: ""
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 443
type: ClusterIP
patch:
enabled: true
image:
repository: docker.io/jettech/kube-webhook-certgen
tag: v1.5.0
pullPolicy: IfNotPresent
## Provide a priority class name to the webhook patching job
##
priorityClassName: ""
podAnnotations: {}
nodeSelector: {}
tolerations: []
runAsUser: 2000
metrics:
port: 10254
# if this port is changed, change healthz-port: in extraArgs: accordingly
enabled: true
service:
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "10254"
# clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 9913
type: ClusterIP
# externalTrafficPolicy: ""
# nodePort: ""
serviceMonitor:
enabled: true
additionalLabels: {}
namespace: ""
namespaceSelector: {}
# Default: scrape .Release.Namespace only
# To scrape all, use the following:
# namespaceSelector:
# any: true
scrapeInterval: 30s
# honorLabels: true
targetLabels: []
metricRelabelings: []
prometheusRule:
enabled: false
additionalLabels: {}
# namespace: ""
rules: []
# # These are just examples rules, please adapt them to your needs
# - alert: NGINXConfigFailed
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
# for: 1s
# labels:
# severity: critical
# annotations:
# description: bad ingress config - nginx config test failed
# summary: uninstall the latest ingress changes to allow config reloads to resume
# - alert: NGINXCertificateExpiry
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
# for: 1s
# labels:
# severity: critical
# annotations:
# description: ssl certificate(s) will expire in less then a week
# summary: renew expiring certificates to avoid downtime
# - alert: NGINXTooMany500s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: warning
# annotations:
# description: Too many 5XXs
# summary: More than 5% of all requests returned 5XX, this requires your attention
# - alert: NGINXTooMany400s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: warning
# annotations:
# description: Too many 4XXs
# summary: More than 5% of all requests returned 4XX, this requires your attention
## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
## to 300, allowing the draining of connections up to five minutes.
## If the active connections end before that, the pod will terminate gracefully at that time.
## To effectively take advantage of this feature, the Configmap feature
## worker-shutdown-timeout new value is 240s instead of 10s.
##
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
priorityClassName: ""
## Rollback limit
##
revisionHistoryLimit: 10
# Maxmind license key to download GeoLite2 Databases
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
maxmindLicenseKey: ""
## Default 404 backend
##
defaultBackend:
##
enabled: true
image:
repository: k8s.gcr.io/defaultbackend-amd64
tag: "1.5"
pullPolicy: IfNotPresent
# nobody user -> uid 65534
runAsUser: 65534
extraArgs: {}
serviceAccount:
create: true
name:
## Additional environment variables to set for defaultBackend pods
extraEnvs: []
port: 8080
## Readiness and liveness probes for default backend
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 0
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
affinity: {}
## Security Context policies for controller pods
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
## notes on enabling and using sysctls
##
podSecurityContext: {}
# labels to add to the pod container metadata
podLabels: {}
# key: value
## Node labels for default backend pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Annotations to be added to default backend pods
##
podAnnotations: {}
replicaCount: 1
minAvailable: 1
resources: {}
# limits:
# cpu: 10m
# memory: 20Mi
# requests:
# cpu: 10m
# memory: 20Mi
service:
annotations: {}
# clusterIP: ""
## List of IP addresses at which the default backend service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 80
type: ClusterIP
priorityClassName: ""
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
rbac:
create: true
scope: false
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
serviceAccount:
create: true
name:
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: secretName
# TCP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
##
tcp: {}
# 8080: "default/example-tcp-svc:9000"
# UDP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
##
udp: {}
# 53: "kube-system/kube-dns:53"

View File

@@ -0,0 +1,44 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: cluster-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-admin
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
namespace: kube-system
name: cluster-admin
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system-default
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
namespace: kube-system
name: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes

16
charts/kube-system/deploy.sh Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
namespace=kube-system
charts=(
cluster-auth-rbac.yaml
kube-proxy.yaml
)
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
kubectl delete secret cluster-ca -n $namespace
kubectl create secret tls cluster-ca \
--namespace=$namespace --cert=$initca/ca.pem --key=$initca/ca-key.pem

View File

@@ -0,0 +1,37 @@
# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-proxy
subjects:
- kind: User
name: kube-proxy
apiGroup: rbac.authorization.k8s.io
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-proxy
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kube-proxy-role
rules:
- apiGroups:
- ""
resources:
- endpoints
- events
- services
- nodes
verbs: ["get", "watch", "list"]
- nonResourceURLs: ["*"]
verbs: ["get", "watch", "list"]
- apiGroups:
- ""
resources:
- events
verbs: ["*"]
- nonResourceURLs: ["*"]
verbs: ["*"]

View File

@@ -0,0 +1,24 @@
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kubernetes-dashboard
annotations:
kubernetes.io/ingress.class: "nginx"
cert-manager.io/cluster-issuer: letsencrypt-production
nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
rules:
- host: dashboard.@cluster@.itpartner.no
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
tls:
- hosts:
- dashboard.@cluster@.itpartner.no
secretName: kubernetes-dashboard-tls

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
version="v2.0.4"
namespace=kubernetes-dashboard
charts=(
dashboard-ingress.yaml
)
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
kubectl apply \
-n $namespace \
-f https://raw.githubusercontent.com/kubernetes/dashboard/$version/aio/deploy/recommended.yaml

19
charts/metrics-server/deploy.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
namespace=kube-system
charts=()
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
metrics-server \
stable/metrics-server \
| make_substitutions \
| sed 's/8443/6443/g' \
| kubectl apply -f -

View File

@@ -0,0 +1,11 @@
image.pullPolicy: Always
rbac:
create: true
apiService:
create: true
hostNetwork:
enabled: true

18
charts/minio/deploy.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=minio
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
minio minio/minio \
| make_substitutions \
| kubectl apply -n $namespace -f -

46
charts/minio/values.yaml Normal file
View File

@@ -0,0 +1,46 @@
# helm repo add minio https://helm.min.io/
# helm install --version 6.0.5 -f minio.yaml -n minio minio minio/minio
accessKey: Mkd324ijlnfll23883
secretKey: KJQfefrnflol93jpj31mrkjs3i88sj2L
# environment:
# MINIO_ACCESS_KEY_OLD: YOURACCESSKEY
# MINIO_SECRET_KEY_OLD: YOURSECRETKEY
defaultBucket:
enabled: true
name: default
policy: none
purge: false
buckets:
- name: serit
policy: none
purge: false
- name: gitlab
policy: none
purge: false
clusterDomain: kube2.local
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/whitelist-source-range: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16
hosts:
- minio.k2.local
tls:
- hosts:
- minio.k2.local
secretName: minio-tls
persistence:
enabled: true
size: 100Gi
storageClass: managed-nfs-storage

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=kube-system
charts=()
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
nfs-client-provisioner \
stable/nfs-client-provisioner \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -0,0 +1,13 @@
image:
tag: latest
storageClass:
name: managed-nfs-storage
defaultClass: true
reclaimPolicy: Delete
archiveOnDelete: true
nfs:
server: @fileserver@
path: /@cluster@
mountOptions:
- nfsvers=4.1

View File

@@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: app-monitor
release: prometheus-operator # required by Prometheus CRD
name: app-monitor
namespace: kube-system
spec:
endpoints:
- interval: 30s
port: http
path: /metrics
jobLabel: app.kubernetes.io/instance
namespaceSelector:
any: true
selector:
matchLabels:
prometheus.io/monitor: http

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
crd_version="v0.42.0"
namespace=prometheus
charts=(
etcd-cert-secret.yaml
app-servicemonitor.yaml
grafana-ldap-toml.yaml
grafana-smtp-secret.yaml
prometheus-etcd-cert.yaml
gitlab-redis-servicemonitor.yaml
gitlab-servicemonitor.yaml
ingress-nginx-servicemonitor.yaml
)
. ../config.sh
install_prometheus_crds () {
crd=(
monitoring.coreos.com_alertmanagers.yaml
monitoring.coreos.com_podmonitors.yaml
monitoring.coreos.com_probes.yaml
monitoring.coreos.com_prometheuses.yaml
monitoring.coreos.com_prometheusrules.yaml
monitoring.coreos.com_servicemonitors.yaml
monitoring.coreos.com_thanosrulers.yaml
)
url=https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.42.0/example/prometheus-operator-crd/
for i in ${crd[@]}; do
kubectl apply -f $url/$i
done
}
disable_unset () {
read yaml
# disable ldap for grafana
[ -z "$grafana_ldap_toml" ] && \
echo "$yaml" | sed '/auth\.ldap:/,+1 s/true/false/; /ldap:/,+1 d' | read yaml
# disable storage
[ -z "$fileserver" ] && \
echo "$yaml" | sed '/prometheusSpec:/,+10d' $1 | read yaml
echo $yaml
}
kubectl create ns $namespace
install_prometheus_crds
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
prometheus \
prometheus-community/kube-prometheus-stack \
| make_substitutions \
| kubectl -n $namespace -f -

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: prometheus-operator
name: etcd-cert
namespace: prometheus
data:
ca.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lVWXVkVEZ2VEpEYW1QUHRSNFh6dzJTMGROMzZjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEU1TVRBeE5UQTVNakV3Ck1Gb1hEVEkwTVRBeE16QTVNakV3TUZvd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUsKRXdWT2FYaFBVekVuTUNVR0ExVUVDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWagpNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVrV0lONVhwRkI2NTJuazdzUGx1CklqNTZTY05sakpOV0JZZXNWaDE4MjhNUE9VVHpuZTgxeUhTMHg3WFVtcGN5VDdDNmRQdlVWckdZUUZCMnZqN0oKcm9Td04xWGtZeW10YXVQVFpqV2J6ZVdSbG1nZ0dpajFhUUZOZy9LelkzZTBkcGdVUEk3TXZQcjFJU001c3JuWgp0YlRRckZmMkVsc3dvVENPMWNYRGMyNTJnblphRW56ZlJEN0o2eXoyVXYvMUZSei9aY29DZVViSnJkNWJjTkk5CmdKYU95MDE0MEdHRzY3WWRtZXBjQWJXeTFOYkFNWlJCamFUUStmZUVWa0p4UGNRNWZqMUhrQ0RuTHJjeEpmdzEKSWhvZFZlNFdLTkhyaUFGR3JTS3JIM1VLb045RFVtU1RGOVVEUWtOQS9zNTRkaEJyMWJFa25lMW1EbGwwYWZhWApqd0lEQVFBQm8wSXdRREFPQmdOVkhROEJBZjhFQkFNQ0FRWXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WCkhRNEVGZ1FVUFQ2THhyWkNVcFEyakRtQ01DRGQ2aVVMbXdnd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLaXMKaFc2bGRBVjZjbnZlM3dhem5aRTVzT1B5T1hRWFdEc2x0a3RzU3B6WU9ocjU5WTZ3Q2oxL05vbGRRaXY4UUJ4TwpQYVF3QUJSL3Evc0p1bUhEODJwSzRwbDZXOThWTDFPdW1wOEoxZjFGV0ZqKzJpMitObG1UUzlHU3FhcGRMdTJoCk9YTUJ6M0JDZFZWSVQ4RGhzZVRBL21WeXlRV1h3LzlsUTYxT1N1c1BubmUwcDBwbkJGTU1TVUhNWFZWeGsrK2UKYzdNTGVVcXFObHliM1JDeGNVQkVTa1h3d1dIaFhhdVR0OTlGQjB5SFNKeTMxd0FNL2pHeUJhdlphb3VMRGticQowNXdwQ3dxQzl6SEFlZElUcThXOUhOQVA5Q1FjejdsV3lDRHhkZ2orN2hoeEdrUUoyYmpFMGxlWlA1bXphRXUyCjdPYUlDVkR0cGE5T2FXY3FpSUE9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0=
etcd-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeVNscjhaYVcwUVptMzlDMElNSVV2V0pnek5DQlJvRlRMMUdaZDZqVldSKzRhMWNTCjg0ZVpuNXhEY2xLL2d3Y0JJcWs3dm9kVXIrenRMVWgzVjIxQnA2ZHljVXpzT041T2lna2l3YTBneStRaHpYZVcKdCtxbStpZFQrbTdzSWMzZVFkd1QxRkt0elRjaGEwMmhNRFNOL2RBTkVOZ0hzUDZTdGZGbyt4UDQ5VS9MRURnUQpZYVM1VlM1L2ZsV2NWejYxMlF4UEtZL2hJejMwdEJYNkQxcHFVL3VyRzVtWmtOUnFFYmVlYWpvNEVNVW5BY3NyCkxhS0txYjlxa2NqcVVwS2FzMTJDNmhvYUFRTkxsL3dYMmkvOVhnSTZxZ2Z6QTNhM0xvSkt3VW8xdVhBY1Jyc3AKQTdkTjlwR3dTakhudXMvaUZpdTBaUTM1ZWhHVjNkYVZ3V1NFYVFJREFRQUJBb0lCQVFDdlU0cmhaWDdOYitiNQo2ODBUUUJVVGUwc2pPQmQwZkFNa210cEhrTDlpam1NT0t6WTVTMVo1NXBoaWEveS9GcnZHbWZtWWRoczV1aVo5CjhVc1N5QWNST01CbVA4VWpTNTdvY0UzNndBcDFxc0JMZEkvSWZKeE1LenJtYXdjajcycG52SWtMNVlQVitOY0gKendGT0lvQWZWOVlMZUQ0NzVhVzVMazl4aGxiV2Rzak9VOW9sTThDNUQvTktadFhOUUZod1N4bHBGNTBPUDEvWgpLQU43WWNYYzJWTGVpV0h0OHBqRGFLekxrbGQ5UFZrWGFiU1h0M251cFdSK0szU3NNRmhyMVNMS3RLTytzN0lvCnQyZUxwMEF2YXFDbjk4MkxMNWNHQXNSQXZmd0UyTWU5clE5Sk1EZTlJU2ZzMDZmRVQvcHdkbVQ5ME0zMlBhQ08KUWpFNmpZSUJBb0dCQVBQR1RxMUhQSVhHS0FVTGM3aWVEb00yaSttWEo2Z3VLYUlzcjFjQmMxKzlwYURIM2hneQpPVkdQeXRjOURIeDV5S055U3hSVEJQcTgyMDhLOGJqUncwWEFKeDFoOU5rWDB2RHhSM29EcGtmWThTY3I5TmhECi9Zb2w2NXRMVDdFZW5KMk9JNVpXR2xYMHY0aHpaQmFaVTN3dnNIVGJBbk04VklaZTczWUUzb0c5QW9HQkFOTkEKQitXazRzdjBOQWVZeFZrOFVOOFhYdS9oRVNQOS9MMGZHZXg0QWYrTTM4dENpZ1kyZ0JwcXIxS2hmbmFKUkxJNAp1R1lEbENrMkNYUWtoem0zSklCSGV5dmc5Wk1BbXJHNHU3YnNJS3lyZEQrNW9Hcm5wSjhZMHBaOWtkWjd3VGwrClJYcnFJelFLQnczUlg3azNVb2FZOWVyVkdSaWpnMDM0OEU0VEp6b2RBb0dBZjR6c2o4ZnRTQVdsM1BYZ0IrSlQKcjU0ejJQWnBIcHVBMTBWUVBWTVovaWRwNlZXS3hOaEttMzJFWWRpa2x1RFY5WWVROTBKMUlZajlxejhMdVd1dwpJK2ZsejM3NHNUckErYWNWZ2ljMHN2VTFRUXpENFFDNlFiV1RzTDdZSk5IaW1xSEx4eGFvVXY5cjFFYWtRUnJhCnp1alpDRnVyellYc3FCNDJaMmNtMFhVQ2dZRUFyclR0MHdZaWF2cHpiVFR6UVhHWWtHZ0xvK2RHL2JVNEtGQm8KYUNCM3hwa0RIRjdUTjRBclFhMnBnWFQ5MlpwOHJjZ3ErSE5OUFNmcmVab2NHNmRURUtRTlFhU3ljL2l6OXZjSQpoNmVRL2p4dHo2WDgvV3pGd0s0T3UxYnVIYXdMaVRqY3pXS0Y2cXZBV2JVbXJVOExlVFZYYS9jQTRsZVlhQXZRCjhVRDEyQ0VDZ1lBUHUwdVlZdnZEUkNtMkNrZUw5U0s2T2o3MEtpb3RucmFSbWdTZVVIRHFLL211NExWejM0Q3cKcGJoTUN4a0I3UlljdXRwcFREdnZVa2lRbEpuek5xNk84Zkp0cEh1MTh0b0RyMWFIcGJhY0QxVFhpRFVjY2kwWQo1enpOOXBMUzc3UkFNWVR5MHJxdUs4L1ZBVWNEb0JVQW5yVVVZT0FPTUIzRlVsbzhtdEJFclE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==
etcd.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnVENDQW1tZ0F3SUJBZ0lVSWNFZ2FyYTlXdVI3U0l3MkRyTXhoRDFsUno4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEl3TURreU9ERTJOVEV3Ck1Gb1hEVEl3TVRBeU9ERTJOVEV3TUZvd0R6RU5NQXNHQTFVRUF4TUVhekl0TURDQ0FTSXdEUVlKS29aSWh2Y04KQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1rcGEvR1dsdEVHWnQvUXRDRENGTDFpWU16UWdVYUJVeTlSbVhlbwoxVmtmdUd0WEV2T0htWitjUTNKU3Y0TUhBU0twTzc2SFZLL3M3UzFJZDFkdFFhZW5jbkZNN0RqZVRvb0pJc0d0CklNdmtJYzEzbHJmcXB2b25VL3B1N0NITjNrSGNFOVJTcmMwM0lXdE5vVEEwamYzUURSRFlCN0Qra3JYeGFQc1QKK1BWUHl4QTRFR0drdVZVdWYzNVZuRmMrdGRrTVR5bVA0U005OUxRVitnOWFhbFA3cXh1Wm1aRFVhaEczbm1vNgpPQkRGSndITEt5MmlpcW0vYXBISTZsS1Ntck5kZ3VvYUdnRURTNWY4Rjlvdi9WNENPcW9IOHdOMnR5NkNTc0ZLCk5ibHdIRWE3S1FPM1RmYVJzRW94NTdyUDRoWXJ0R1VOK1hvUmxkM1dsY0ZraEdrQ0F3RUFBYU9CbGpDQmt6QU8KQmdOVkhROEJBZjhFQkFNQ0I0QXdEQVlEVlIwVEFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVTFtNmdXQjZJaDNzdgpTTUdrVHhBZFVTNy9WS013SHdZRFZSMGpCQmd3Rm9BVVBUNkx4clpDVXBRMmpEbUNNQ0RkNmlVTG13Z3dNd1lEClZSMFJCQ3d3S29JS1pYUmpaQzVzYjJOaGJJSVFaWFJqWkM1cmRXSmxNaTVzYjJOaGJJSUVhekl0TUljRUN2MFMKY2pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQURqR3lSbnRndW9QaUFZeGpRbXBPM2dHSWxjTjNCSVBpVEVEZApEUGsxcGJyakt3Z3FGa0JpU3A1ZmxDbFpCS1lXM3pRRWVKVDEyTCtkczhCMWp5WHVyZ1ZtU1RRWURYYkpiTmNoCmY1WjFyMnQzRXBsOEdTTk5Ec2ZETGo5aUhacml3TUlGRk9XOHNpRnk0ek1SQm4wRC9oeS9LUnVRREQxNHIySG0KWVM3Ty9hUTdaNDBiWThvZ0xVd2oyUHE0M1IxWmhGb0JNR1dFNW5jMW9TVkJHS2NQaWxiby9GSHBJTk1tYmdzbwpNK1FGNTkzWTE2S0o2K1FUKzhUZ1MyMVl6dTQ1RTAwOXMvc1piQkZuL0l1WkJxWHFkZEFZclI4Rm44SytBdGZFCnh6aTFLTnZJWTEzcXRrV21LN3hUTVl6TSsxTEVhOStidkxoNG1ybHFlWTVmVnlBOWF3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
type: Opaque

View File

@@ -0,0 +1,20 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: gitlab-redis-monitor
release: prometheus-operator # required by Prometheus CRD
name: gitlab-monitor
namespace: gitlab
spec:
endpoints:
- interval: 30s
port: metrics
path: /metrics
jobLabel: app
namespaceSelector:
matchNmaes:
- gitlab
selector:
matchLabels:
app: redis

View File

@@ -0,0 +1,21 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
annotations:
labels:
app: gitlab-exporter-monitor
release: prometheus-operator
name: gitlab-exporter-monitor
namespace: gitlab
spec:
endpoints:
- interval: 30s
path: /metrics
port: gitlab-exporter
jobLabel: app
namespaceSelector:
matchNames:
- gitlab
selector:
matchLabels:
app: gitlab-exporter

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: List
metadata: {}
items:
- apiVersion: v1
kind: Secret
type: Opaque
metadata:
labels:
app: grafana
name: grafana-ldap-toml
namespace: prometheus
data:
ldap-toml: @grafana_ldap_toml@

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: List
metadata: {}
items:
- apiVersion: v1
kind: Secret
type: Opaque
metadata:
labels:
app: grafana
name: grafana-smtp
namespace: prometheus
data:
user: @grafana_smtp_user@
password: @grafana_smtp_password@

View File

@@ -0,0 +1,23 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: nginx-ingress
release: prometheus-operator # required by Prometheus CRD
name: nginx-ingress
namespace: kube-system
spec:
endpoints:
- interval: 15s
port: metrics
jobLabel: app
selector:
matchExpressions:
- key: app
operator: In
values:
- nginx-ingress
- key: component
operator: In
values:
- controller

View File

@@ -0,0 +1,81 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: prometheus-operator
name: etcd-cert
namespace: kube-system
type: Opaque
stringData:
etcd.pem: |-
-----BEGIN CERTIFICATE-----
MIIDgTCCAmmgAwIBAgIUfVvzugELXCci7r1kRjPUOaXt2S4wDQYJKoZIhvcNAQEL
BQAwTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQKEwVOaXhPUzEnMCUGA1UE
CxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVjMB4XDTIwMDEwMjE2MDcw
MFoXDTIwMDIwMTE2MDcwMFowDzENMAsGA1UEAxMEazItMDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMkpa/GWltEGZt/QtCDCFL1iYMzQgUaBUy9RmXeo
1VkfuGtXEvOHmZ+cQ3JSv4MHASKpO76HVK/s7S1Id1dtQaencnFM7DjeTooJIsGt
IMvkIc13lrfqpvonU/pu7CHN3kHcE9RSrc03IWtNoTA0jf3QDRDYB7D+krXxaPsT
+PVPyxA4EGGkuVUuf35VnFc+tdkMTymP4SM99LQV+g9aalP7qxuZmZDUahG3nmo6
OBDFJwHLKy2iiqm/apHI6lKSmrNdguoaGgEDS5f8F9ov/V4COqoH8wN2ty6CSsFK
NblwHEa7KQO3TfaRsEox57rP4hYrtGUN+XoRld3WlcFkhGkCAwEAAaOBljCBkzAO
BgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU1m6gWB6Ih3sv
SMGkTxAdUS7/VKMwHwYDVR0jBBgwFoAUPT6LxrZCUpQ2jDmCMCDd6iULmwgwMwYD
VR0RBCwwKoIKZXRjZC5sb2NhbIIQZXRjZC5rdWJlMi5sb2NhbIIEazItMIcECv0S
cjANBgkqhkiG9w0BAQsFAAOCAQEAUfDYNj/Yc4HqMzcez7NGBKOyJsgyuhJ+tnwv
aurgfIVMcePdFLz24guKsGfJotP28M0NCZm7v/2OSPzobXhis1yzZh2rv1KWuEkf
uQheXUZ5u65f9Sc+klO/deubbckBP+0vWg4Ru4v9t+vCXZQt4u4OGqwLsG6KxxtG
yXFSPuOOBLbyZfyGNCkOv64OF0qY648cLRH9mfZ1WOlcRdexLi+mtwQlWlCD+02f
iTZYIYvNHpKb1oa6J7/QguouRTue7ZkQuNG0p7FJiLHs5nt750HKOTsSjxfM5+SA
+rohNvUwao+K7rsLj2k3WSOU/Ju6uSqbtGEFgfh/oUBdkYwKJQ==
-----END CERTIFICATE-----
etcd-key.pem: |-
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAySlr8ZaW0QZm39C0IMIUvWJgzNCBRoFTL1GZd6jVWR+4a1cS
84eZn5xDclK/gwcBIqk7vodUr+ztLUh3V21Bp6dycUzsON5Oigkiwa0gy+QhzXeW
t+qm+idT+m7sIc3eQdwT1FKtzTcha02hMDSN/dANENgHsP6StfFo+xP49U/LEDgQ
YaS5VS5/flWcVz612QxPKY/hIz30tBX6D1pqU/urG5mZkNRqEbeeajo4EMUnAcsr
LaKKqb9qkcjqUpKas12C6hoaAQNLl/wX2i/9XgI6qgfzA3a3LoJKwUo1uXAcRrsp
A7dN9pGwSjHnus/iFiu0ZQ35ehGV3daVwWSEaQIDAQABAoIBAQCvU4rhZX7Nb+b5
680TQBUTe0sjOBd0fAMkmtpHkL9ijmMOKzY5S1Z55phia/y/FrvGmfmYdhs5uiZ9
8UsSyAcROMBmP8UjS57ocE36wAp1qsBLdI/IfJxMKzrmawcj72pnvIkL5YPV+NcH
zwFOIoAfV9YLeD475aW5Lk9xhlbWdsjOU9olM8C5D/NKZtXNQFhwSxlpF50OP1/Z
KAN7YcXc2VLeiWHt8pjDaKzLkld9PVkXabSXt3nupWR+K3SsMFhr1SLKtKO+s7Io
t2eLp0AvaqCn982LL5cGAsRAvfwE2Me9rQ9JMDe9ISfs06fET/pwdmT90M32PaCO
QjE6jYIBAoGBAPPGTq1HPIXGKAULc7ieDoM2i+mXJ6guKaIsr1cBc1+9paDH3hgy
OVGPytc9DHx5yKNySxRTBPq8208K8bjRw0XAJx1h9NkX0vDxR3oDpkfY8Scr9NhD
/Yol65tLT7EenJ2OI5ZWGlX0v4hzZBaZU3wvsHTbAnM8VIZe73YE3oG9AoGBANNA
B+Wk4sv0NAeYxVk8UN8XXu/hESP9/L0fGex4Af+M38tCigY2gBpqr1KhfnaJRLI4
uGYDlCk2CXQkhzm3JIBHeyvg9ZMAmrG4u7bsIKyrdD+5oGrnpJ8Y0pZ9kdZ7wTl+
RXrqIzQKBw3RX7k3UoaY9erVGRijg0348E4TJzodAoGAf4zsj8ftSAWl3PXgB+JT
r54z2PZpHpuA10VQPVMZ/idp6VWKxNhKm32EYdikluDV9YeQ90J1IYj9qz8LuWuw
I+flz374sTrA+acVgic0svU1QQzD4QC6QbWTsL7YJNHimqHLxxaoUv9r1EakQRra
zujZCFurzYXsqB42Z2cm0XUCgYEArrTt0wYiavpzbTTzQXGYkGgLo+dG/bU4KFBo
aCB3xpkDHF7TN4ArQa2pgXT92Zp8rcgq+HNNPSfreZocG6dTEKQNQaSyc/iz9vcI
h6eQ/jxtz6X8/WzFwK4Ou1buHawLiTjczWKF6qvAWbUmrU8LeTVXa/cA4leYaAvQ
8UD12CECgYAPu0uYYvvDRCm2CkeL9SK6Oj70KiotnraRmgSeUHDqK/mu4LVz34Cw
pbhMCxkB7RYcutppTDvvUkiQlJnzNq6O8fJtpHu18toDr1aHpbacD1TXiDUcci0Y
5zzN9pLS77RAMYTy0rquK8/VAUcDoBUAnrUUYOAOMB3FUlo8mtBErQ==
-----END RSA PRIVATE KEY-----
ca.pem: |-
-----BEGIN CERTIFICATE-----
MIIDajCCAlKgAwIBAgIUYudTFvTJDamPPtR4Xzw2S0dN36cwDQYJKoZIhvcNAQEL
BQAwTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQKEwVOaXhPUzEnMCUGA1UE
CxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVjMB4XDTE5MTAxNTA5MjEw
MFoXDTI0MTAxMzA5MjEwMFowTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQK
EwVOaXhPUzEnMCUGA1UECxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVj
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAukWIN5XpFB652nk7sPlu
Ij56ScNljJNWBYesVh1828MPOUTzne81yHS0x7XUmpcyT7C6dPvUVrGYQFB2vj7J
roSwN1XkYymtauPTZjWbzeWRlmggGij1aQFNg/KzY3e0dpgUPI7MvPr1ISM5srnZ
tbTQrFf2ElswoTCO1cXDc252gnZaEnzfRD7J6yz2Uv/1FRz/ZcoCeUbJrd5bcNI9
gJaOy0140GGG67YdmepcAbWy1NbAMZRBjaTQ+feEVkJxPcQ5fj1HkCDnLrcxJfw1
IhodVe4WKNHriAFGrSKrH3UKoN9DUmSTF9UDQkNA/s54dhBr1bEkne1mDll0afaX
jwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUPT6LxrZCUpQ2jDmCMCDd6iULmwgwDQYJKoZIhvcNAQELBQADggEBAKis
hW6ldAV6cnve3waznZE5sOPyOXQXWDsltktsSpzYOhr59Y6wCj1/NoldQiv8QBxO
PaQwABR/q/sJumHD82pK4pl6W98VL1Oump8J1f1FWFj+2i2+NlmTS9GSqapdLu2h
OXMBz3BCdVVIT8DhseTA/mVyyQWXw/9lQ61OSusPnne0p0pnBFMMSUHMXVVxk++e
c7MLeUqqNlyb3RCxcUBESkXwwWHhXauTt99FB0yHSJy31wAM/jGyBavZaouLDkbq
05wpCwqC9zHAedITq8W9HNAP9CQcz7lWyCDxdgj+7hhxGkQJ2bjE0leZP5mzaEu2
7OaICVDtpa9OaWcqiIA=
-----END CERTIFICATE-----

File diff suppressed because it is too large Load Diff

18
charts/sentry/deploy.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=sentry
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
sentry stable/sentry \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: sentry-postgresql
chart: postgresql-6.5.0
heritage: Helm
release: sentry
name: sentry-sentry-postgresql
namespace: kube-system
type: Opaque
data:
postgresql-password: a1pyWlBCazVzSQ==

287
charts/sentry/values.yaml Normal file
View File

@@ -0,0 +1,287 @@
# helm install --namespace kube-system --timeout 1000 -f sentry.yaml sentry stable/sentry
# image:
# repository: sentry
# tag: 9
# pullPolicy: IfNotPresent
# # Add the secret name to pull from a private registry.
# imagePullSecrets: []
# # - name:
# How many web UI instances to run
# web:
# replicacount: 1
# resources:
# limits:
# cpu: 500m
# memory: 500Mi
# requests:
# cpu: 300m
# memory: 300Mi
# env:
# - name: GITHUB_APP_ID
# value:
# - name: GITHUB_API_SECRET
# value:
# nodeSelector: {}
# tolerations: []
# affinity: {}
# probeInitialDelaySeconds: 50
# priorityClassName: ""
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# How many cron instances to run
# cron:
# replicacount: 1
# resources:
# limits:
# cpu: 200m
# memory: 200Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# How many worker instances to run
# worker:
# replicacount: 2
# resources:
# limits:
# cpu: 300m
# memory: 500Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# priorityClassName: ""
# schedulerName:
# Optional extra labels for pod, i.e. redis-client: "true"
# podLabels: []
# concurrency:
# Admin user to create
user:
# Indicated to create the admin user or not,
# Default is true as the initial installation.
create: true
email: admin
# BYO Email server
# TODO: Add exim4 template
# https://docs.sentry.io/server/installation/docker/#outbound-email
email:
from_address: sentry@sentry.itpartner.no
host: smtpgw.itpartner.no
port: 465
use_tls: false
user: utvikling
password: S0m3rp0m@de#21!
enable_replies: false
# Name of the service and what port to expose on the pod
# Don't change these unless you know what you're doing
service:
name: sentry
type: ClusterIP
# externalPort: 9000
# internalPort: 9000
# ## Service annotations
# ##
# annotations: {}
## External IP addresses of service
## Default: nil
##
# externalIPs:
# - 192.168.0.1
## Load Balancer allow-list
# loadBalancerSourceRanges: []
# Configure the location of Sentry artifacts
filestore:
# Set to one of filesystem, gcs or s3 as supported by Sentry.
backend: filesystem
filesystem:
path: /var/lib/sentry/files
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: managed-nfs-storage
accessMode: ReadWriteOnce
size: 10Gi
## Whether to mount the persistent volume to the Sentry worker and
## cron deployments. This setting needs to be enabled for some advanced
## Sentry features, such as private source maps. If you disable this
## setting, the Sentry workers will not have access to artifacts you upload
## through the web deployment.
## Please note that you may need to change your accessMode to ReadWriteMany
## if you plan on having the web, worker and cron deployments run on
## different nodes.
# persistentWorkers: false
## Point this at a pre-configured secret containing a service account. The resulting
## secret will be mounted at /var/run/secrets/google
# gcs:
# credentialsFile: credentials.json
# secretName:
# bucketName:
## Currently unconfigured and changing this has no impact on the template configuration.
# s3: {}
# accessKey:
# secretKey:
# bucketName:
## Configure ingress resource that allow you to access the
## Sentry installation. Set up the URL
## ref: http://kubernetes.io/docs/user-guide/ingress/
##
ingress:
enabled: true
hostname: sentry.itpartner.no
## Ingress annotations
##
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# kubernetes.io/tls-acme: 'true'
tls:
- secretName: sentry-tls-cert
hosts:
- sentry.itpartner.no
# TODO: add support for plugins https://docs.sentry.io/server/plugins/
postgresql:
enabled: true
postgresqlDatabase: sentry
postgresqlUsername: postgres
postgresqlPassword: jdjiujh1212eo
# # Only used when internal PG is disabled
# # postgresHost: postgres
# # postgresPassword: postgres
# # postgresPort: 5432
# imageTag: "9.6"
# persistence:
# enabled: true
redis:
clusterDomain: kube2.local
# enabled: true
# Only used when internal redis is disabled
# host: redis
# Just omit the password field if your redis cluster doesn't use password
# password: redis
# port: 6379
# master:
# persistence:
# enabled: true
# If change pvc size redis.master.persistence.size: 20Gi
# config:
# configYml: ""
# sentryConfPy: ""
## Prometheus Exporter / Metrics
##
#metrics:
# enabled: true
# ## Configure extra options for liveness and readiness probes
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
# livenessProbe:
# enabled: true
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 2
# failureThreshold: 3
# successThreshold: 1
# readinessProbe:
# enabled: true
# initialDelaySeconds: 30
# periodSeconds: 5
# timeoutSeconds: 2
# failureThreshold: 3
# successThreshold: 1
# ## Metrics exporter resource requests and limits
# ## ref: http://kubernetes.io/docs/user-guide/compute-resources/
# resources:
# limits:
# cpu: 100m
# memory: 100Mi
# requests:
# cpu: 100m
# memory: 100Mi
# nodeSelector: {}
# tolerations: []
# affinity: {}
# # schedulerName:
# # Optional extra labels for pod, i.e. redis-client: "true"
# # podLabels: []
# service:
# type: ClusterIP
# labels: {}
# image:
# repository: prom/statsd-exporter
# tag: v0.10.5
# pullPolicy: IfNotPresent
# # Enable this if you're using https://github.com/coreos/prometheus-operator
# serviceMonitor:
# enabled: true
# ## Specify a namespace if needed
# # namespace: kube-system
# # fallback to the prometheus default unless specified
# # interval: 10s
# ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/helm/charts/tree/master/stable/prometheus-operator#tldr)
# ## [Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#prometheus-operator-1)
# ## [Kube Prometheus Selector Label](https://github.com/helm/charts/tree/master/stable/prometheus-operator#exporters)
# # selector:
# # app: prometheus
# #prometheus: kube-prometheus
## Provide affinity for hooks if needed
#hooks:
# affinity: {}
# dbInit:
# resources:
# # We setup 3000Mi for the memory limit because of a Sentry instance need at least 3Gb RAM to perform a migration process
# # reference: https://github.com/helm/charts/issues/15296
# limits:
# memory: 3200Mi
# requests:
# memory: 3000Mi

18
charts/seq/deploy.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=seq
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
seq stable/seq \
| make_substitutions \
| kubectl apply -n $namespace -f -

154
charts/seq/values.yaml Normal file
View File

@@ -0,0 +1,154 @@
# Default values for Seq.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: datalust/seq
tag: 2020
pullPolicy: IfNotPresent
# By passing the value Y in the ACCEPT_EULA environment variable,
# you are expressing that you have read and accepted the terms in
# Seq End User License Agreement applicable to the Seq Docker image
# that you intend to use.
acceptEULA: "Y"
# Set this URL if you enable ingress and/or AAD authentication.
# Without this URL set to include HTTPS, Seq will try to set a login redirect
# URL with HTTP instead of HTTPS and AAD's registration requires HTTPS.
# The result is that you'll get an error during login:
# AADSTS50011: The reply url specified in the request does not match the reply urls configured for the application
# baseURI: https://my.public.url/
# The complete Seq API and UI.
# This API can accept events and serve API requests.
ui:
service:
port: 80
ingress:
enabled: true
path: /
hosts:
- seq.k2.local
# The ingestion-only API.
# This API is a subset of ui that can only ingest events.
ingestion:
service:
port: 5341
ingress:
enabled: false
path: /
hosts: []
# Accept events in the GELF format and forward them to Seq.
gelf:
enabled: false
image:
repository: datalust/sqelf
tag: 2
pullPolicy: IfNotPresent
service:
port: 12201
# GELF can be ingested through either TCP or UDP
protocol: TCP
service:
type: ClusterIP
ingress:
annotations:
cert-manager.io/cluster-issuer: ca-issuer
kubernetes.io/ingress.class: nginx
tls:
- secretName: seq-tls
hosts:
- seq.k2.local
resources: {}
# We recommend uncommenting these and specifying an explicit memory limit that
# suits your workload.
# limits:
# memory: 256Mi
# requests:
# memory: 256Mi
cache:
# The fraction of RAM that the cache should try fit within. Specifying a larger
# value may allow more events in RAM at the expense of potential instability.
# Setting it to `0` will disable the cache completely.
# 70% (`0.7`) is a good starting point for machines with up to ~8GB of RAM.
targetSize: 0.7
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
persistence:
enabled: true
## The path the volume will be mounted at
path: /data
## The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services.
subPath: ""
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## Seq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: managed-nfs-storage
accessMode: ReadWriteOnce
size: 8Gi
serviceAccount:
create: false
name:
## Enable RBAC
rbac:
create: false
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
create: false
securityContext:
privileged: true
## Configure probe values
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 10

17
charts/vault/deploy.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
namespace=vault
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f vault-values.yaml \
vault hashicorp/vault \
| make_substitutions \
| kubectl apply -n $namespace -f -

588
charts/vault/values.yaml Normal file
View File

@@ -0,0 +1,588 @@
# Available parameters and their default values for the Vault chart.
global:
# enabled is the master enabled switch. Setting this to true or false
# will enable or disable all the components within this chart by default.
enabled: true
# Image pull secret to use for registry authentication.
imagePullSecrets: []
# imagePullSecrets:
# - name: image-pull-secret
# TLS for end-to-end encrypted transport
tlsDisable: true
# If deploying to OpenShift
openshift: false
# Create PodSecurityPolicy for pods
psp:
enable: false
# Annotation for PodSecurityPolicy.
# This is a multi-line templated string map, and can also be set as YAML.
annotations: |
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
injector:
# True if you want to enable vault agent injection.
enabled: true
# If true, will enable a node exporter metrics endpoint at /metrics.
metrics:
enabled: false
# External vault server address for the injector to use. Setting this will
# disable deployment of a vault server along with the injector.
externalVaultAddr: ""
# image sets the repo and tag of the vault-k8s image to use for the injector.
image:
repository: "hashicorp/vault-k8s"
tag: "0.6.0"
pullPolicy: IfNotPresent
# agentImage sets the repo and tag of the Vault image to use for the Vault Agent
# containers. This should be set to the official Vault image. Vault 1.3.1+ is
# required.
agentImage:
repository: "vault"
tag: "1.5.4"
# Mount Path of the Vault Kubernetes Auth Method.
authPath: "auth/kubernetes"
# Configures the log verbosity of the injector. Supported log levels: Trace, Debug, Error, Warn, Info
logLevel: "info"
# Configures the log format of the injector. Supported log formats: "standard", "json".
logFormat: "standard"
# Configures all Vault Agent sidecars to revoke their token when shutting down
revokeOnShutdown: false
# namespaceSelector is the selector for restricting the webhook to only
# specific namespaces.
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector
# for more details.
# Example:
# namespaceSelector:
# matchLabels:
# sidecar-injector: enabled
namespaceSelector: {}
# Configures failurePolicy of the webhook. By default webhook failures are ignored.
# To block pod creation while webhook is unavailable, set the policy to `Fail` below.
# See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy
#
# failurePolcy: Fail
certs:
# secretName is the name of the secret that has the TLS certificate and
# private key to serve the injector webhook. If this is null, then the
# injector will default to its automatic management mode that will assign
# a service account to the injector to generate its own certificates.
secretName: null
# caBundle is a base64-encoded PEM-encoded certificate bundle for the
# CA that signed the TLS certificate that the webhook serves. This must
# be set if secretName is non-null.
caBundle: ""
# certName and keyName are the names of the files within the secret for
# the TLS cert and private key, respectively. These have reasonable
# defaults but can be customized if necessary.
certName: tls.crt
keyName: tls.key
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# extraEnvironmentVars is a list of extra environment variables to set in the
# injector deployment.
extraEnvironmentVars: {}
# KUBERNETES_SERVICE_HOST: kubernetes.default.svc
# Affinity Settings for injector pods
# This should be a multi-line string matching the affinity section of a
# PodSpec.
affinity: null
# Toleration Settings for injector pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: null
# nodeSelector labels for injector pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: null
# Priority class for injector pods
priorityClassName: ""
# Extra annotations to attach to the injector pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the injector pods
annotations: {}
server:
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec.
# By default no direct resource request is made.
image:
repository: "vault"
tag: "1.5.4"
# Overrides the default Image Pull Policy
pullPolicy: IfNotPresent
# Configure the Update Strategy Type for the StatefulSet
# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
updateStrategyType: "OnDelete"
resources: {}
# resources:
# requests:
# memory: 256Mi
# cpu: 250m
# limits:
# memory: 256Mi
# cpu: 250m
# Ingress allows ingress services to be created to allow external access
# from Kubernetes to access Vault pods.
# If deployment is on OpenShift, the following block is ignored.
# In order to expose the service, use the route section below
ingress:
enabled: true
labels: {}
# traffic: external
annotations:
# |
cert-manager.io/cluster-issuer: letsencrypt-production
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: vault.k2.itpartner.no
paths: [ / ]
tls:
- secretName: vault-tls
hosts:
- vault.k2.itpartner.no
# OpenShift only - create a route to expose the service
# The created route will be of type passthrough
route:
enabled: false
labels: {}
annotations: {}
host: chart-example.local
# authDelegator enables a cluster role binding to be attached to the service
# account. This cluster role binding can be used to setup Kubernetes auth
# method. https://www.vaultproject.io/docs/auth/kubernetes.html
authDelegator:
enabled: true
# extraInitContainers is a list of init containers. Specified as a YAML list.
# This is useful if you need to run a script to provision TLS certificates or
# write out configuration files in a dynamic way.
extraInitContainers: null
# # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder,
# # which is defined in the volumes value.
# - name: oauthapp
# image: "alpine"
# command: [sh, -c]
# args:
# - cd /tmp &&
# wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz &&
# tar -xf oauthapp.xz &&
# mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp &&
# chmod +x /usr/local/libexec/vault/oauthapp
# volumeMounts:
# - name: plugins
# mountPath: /usr/local/libexec/vault
# extraContainers is a list of sidecar containers. Specified as a YAML list.
extraContainers: null
# shareProcessNamespace enables process namespace sharing between Vault and the extraContainers
# This is useful if Vault must be signaled, e.g. to send a SIGHUP for log rotation
shareProcessNamespace: false
# extraArgs is a string containing additional Vault server arguments.
extraArgs: ""
# Used to define custom readinessProbe settings
readinessProbe:
enabled: true
# If you need to use a http path instead of the default exec
# path: /v1/sys/health?standbyok=true
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 5
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to enable a livenessProbe for the pods
livenessProbe:
enabled: false
path: "/v1/sys/health?standbyok=true"
# When a probe fails, Kubernetes will try failureThreshold times before giving up
failureThreshold: 2
# Number of seconds after the container has started before probe initiates
initialDelaySeconds: 60
# How often (in seconds) to perform the probe
periodSeconds: 5
# Minimum consecutive successes for the probe to be considered successful after having failed
successThreshold: 1
# Number of seconds after which the probe times out.
timeoutSeconds: 3
# Used to set the sleep time during the preStop step
preStopSleepSeconds: 5
# Used to define commands to run after the pod is ready.
# This can be used to automate processes such as initialization
# or boostrapping auth methods.
postStart: []
# - /bin/sh
# - -c
# - /vault/userconfig/myscript/run.sh
# extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be
# used to include variables required for auto-unseal.
extraEnvironmentVars: {}
# GOOGLE_REGION: global
# GOOGLE_PROJECT: myproject
# GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json
# extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set.
# These variables take value from existing Secret objects.
extraSecretEnvironmentVars: []
# - envName: AWS_SECRET_ACCESS_KEY
# secretName: vault
# secretKey: AWS_SECRET_ACCESS_KEY
# extraVolumes is a list of extra volumes to mount. These will be exposed
# to Vault in the path `/vault/userconfig/<name>/`. The value below is
# an array of objects, examples are shown below.
extraVolumes: []
# - type: secret (or "configMap")
# name: my-secret
# path: null # default is `/vault/userconfig`
# volumes is a list of volumes made available to all containers. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumes: null
# - name: plugins
# emptyDir: {}
# volumeMounts is a list of volumeMounts for the main server container. These are rendered
# via toYaml rather than pre-processed like the extraVolumes value.
# The purpose is to make it easy to share volumes between containers.
volumeMounts: null
# - mountPath: /usr/local/libexec/vault
# name: plugins
# readOnly: true
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "vault.name" . }}
app.kubernetes.io/instance: "{{ .Release.Name }}"
component: server
topologyKey: kubernetes.io/hostname
# Toleration Settings for server pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: null
# nodeSelector labels for server pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
# Example:
# nodeSelector: |
# beta.kubernetes.io/arch: amd64
nodeSelector: null
# Enables network policy for server pods
networkPolicy:
enabled: false
# Priority class for server pods
priorityClassName: ""
# Extra labels to attach to the server pods
# This should be a YAML map of the labels to apply to the server pods
extraLabels: {}
# Extra annotations to attach to the server pods
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the server pods
annotations: {}
# Enables a headless service to be used by the Vault Statefulset
service:
enabled: true
# clusterIP controls whether a Cluster IP address is attached to the
# Vault service within Kubernetes. By default the Vault service will
# be given a Cluster IP address, set to None to disable. When disabled
# Kubernetes will create a "headless" service. Headless services can be
# used to communicate with pods directly through DNS instead of a round robin
# load balancer.
# clusterIP: None
# Configures the service type for the main Vault service. Can be ClusterIP
# or NodePort.
#type: ClusterIP
# If type is set to "NodePort", a specific nodePort value can be configured,
# will be random if left blank.
#nodePort: 30000
# Port on which Vault server is listening
port: 8200
# Target port to which the service should be mapped to
targetPort: 8200
# Extra annotations for the service definition. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the service.
annotations: {}
# This configures the Vault Statefulset to create a PVC for data
# storage when using the file or raft backend storage engines.
# See https://www.vaultproject.io/docs/configuration/storage/index.html to know more
dataStorage:
enabled: true
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/data"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# This configures the Vault Statefulset to create a PVC for audit
# logs. Once Vault is deployed, initialized and unseal, Vault must
# be configured to use this for audit logs. This will be mounted to
# /vault/audit
# See https://www.vaultproject.io/docs/audit/index.html to know more
auditStorage:
enabled: false
# Size of the PVC created
size: 10Gi
# Location where the PVC will be mounted.
mountPath: "/vault/audit"
# Name of the storage class to use. If null it will use the
# configured default Storage Class.
storageClass: null
# Access Mode of the storage device being used for the PVC
accessMode: ReadWriteOnce
# Annotations to apply to the PVC
annotations: {}
# Run Vault in "dev" mode. This requires no further setup, no state management,
# and no initialization. This is useful for experimenting with Vault without
# needing to unseal, store keys, et. al. All data is lost on restart - do not
# use dev mode for anything other than experimenting.
# See https://www.vaultproject.io/docs/concepts/dev-server.html to know more
dev:
enabled: false
# Run Vault in "standalone" mode. This is the default mode that will deploy if
# no arguments are given to helm. This requires a PVC for data storage to use
# the "file" backend. This mode is not highly available and should not be scaled
# past a single replica.
standalone:
enabled: "-"
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data
# and store data there. This is only used when using a Replica count of 1, and
# using a stateful set. This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "file" {
path = "/vault/data"
}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# Run Vault in "HA" mode. There are no storage requirements unless audit log
# persistence is required. In HA mode Vault will configure itself to use Consul
# for its storage backend. The default configuration provided will work the Consul
# Helm project by default. It is possible to manually configure Vault to use a
# different HA backend.
ha:
enabled: false
replicas: 3
# Set the api_addr configuration for Vault HA
# See https://www.vaultproject.io/docs/configuration#api_addr
# If set to null, this will be set to the Pod IP Address
apiAddr: null
# Enables Vault's integrated Raft storage. Unlike the typical HA modes where
# Vault's persistence is external (such as Consul), enabling Raft mode will create
# persistent volumes for Vault to store data according to the configuration under server.dataStorage.
# The Vault cluster will coordinate leader elections and failovers internally.
raft:
# Enables Raft integrated storage
enabled: false
# Set the Node Raft ID to the name of the pod
setNodeId: false
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "raft" {
path = "/vault/data"
}
service_registration "kubernetes" {}
# config is a raw string of default configuration when using a Stateful
# deployment. Default is to use a Consul for its HA storage backend.
# This should be HCL.
# Note: Configuration files are stored in ConfigMaps so sensitive data
# such as passwords should be either mounted through extraSecretEnvironmentVars
# or through a Kube secret. For more information see:
# https://www.vaultproject.io/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
cluster_address = "[::]:8201"
}
storage "consul" {
path = "vault"
address = "HOST_IP:8500"
}
service_registration "kubernetes" {}
# Example configuration for using auto-unseal, using Google Cloud KMS. The
# GKMS keys must already exist, and the cluster must have a service account
# that is authorized to access GCP KMS.
#seal "gcpckms" {
# project = "vault-helm-dev-246514"
# region = "global"
# key_ring = "vault-helm-unseal-kr"
# crypto_key = "vault-helm-unseal-key"
#}
# A disruption budget limits the number of pods of a replicated application
# that are down simultaneously from voluntary disruptions
disruptionBudget:
enabled: true
# maxUnavailable will default to (n/2)-1 where n is the number of
# replicas. If you'd like a custom value, you can specify an override here.
maxUnavailable: null
# Definition of the serviceAccount used to run Vault.
# These options are also used when using an external Vault server to validate
# Kubernetes tokens.
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# Extra annotations for the serviceAccount definition. This can either be
# YAML or a YAML-formatted multi-line templated string map of the
# annotations to apply to the serviceAccount.
annotations: {}
# Settings for the statefulSet used to run Vault.
statefulSet:
# Extra annotations for the statefulSet. This can either be YAML or a
# YAML-formatted multi-line templated string map of the annotations to apply
# to the statefulSet.
annotations: {}
# Vault UI
ui:
# True if you want to create a Service entry for the Vault UI.
#
# serviceType can be used to control the type of service created. For
# example, setting this to "LoadBalancer" will create an external load
# balancer (for supported K8S installations) to access the UI.
enabled: false
publishNotReadyAddresses: true
# The service should only contain selectors for active Vault pod
activeVaultPodOnly: false
serviceType: "ClusterIP"
serviceNodePort: null
externalPort: 8200
# loadBalancerSourceRanges:
# - 10.0.0.0/16
# - 1.78.23.3/32
# loadBalancerIP:
# Extra annotations to attach to the ui service
# This can either be YAML or a YAML-formatted multi-line templated string map
# of the annotations to apply to the ui service
annotations: {}