Totally revamp cluster chart configs

This commit is contained in:
Jonas Juselius
2020-11-05 21:05:34 +01:00
parent 6fea8b3bc8
commit 3a69e7f1f1
55 changed files with 3921 additions and 531 deletions

439
charts/anchore/anchore.yaml Normal file
View File

@@ -0,0 +1,439 @@
# Default values for anchore_engine chart.
# Anchore engine has a dependency on Postgresql, configure here
postgresql:
# To use an external DB or Google CloudSQL in GKE, uncomment & set 'enabled: false'
# externalEndpoint, postgresUser, postgresPassword & postgresDatabase are required values for external postgres
# enabled: false
postgresUser: anchoreengine
postgresPassword: KebabNinja2020
postgresDatabase: anchore
# Specify an external (already existing) postgres deployment for use.
# Set to the host and port. eg. mypostgres.myserver.io:5432
externalEndpoint: Null
# Configure size of the persistent volume used with helm managed chart.
# This should be commented out if using an external endpoint.
persistence:
storageClass: managed-nfs-storage
resourcePolicy: nil
size: 20Gi
ingress:
enabled: true
labels: {}
# Exposing the feeds API w/ ingress is for special cases only, uncomment feedsPath if external access to the feeds API is needed
# feedsPath: /v1/feeds/
apiPath: /v1/
uiPath: /
# Uncomment the following lines to bind on specific hostnames
# apiHosts:
# - anchore-api.example.com
# uiHosts:
# - anchore-ui.example.com
# feedsHosts:
# - anchore-feeds.example.com
annotations:
kubernetes.io/ingress.class: nginx
certmanager.io/cluster-issuer: ca-issuer
nginx.ingress.kubernetes.io/ssl-redirect: "true"
tls:
- secretName: anchore-tls
hosts:
- anchore.k2.local
# Global configuration shared by all anchore-engine services.
anchoreGlobal:
# Image used for all anchore engine deployments (excluding enterprise components).
image: docker.io/anchore/anchore-engine:v0.8.1
imagePullPolicy: IfNotPresent
# Set image pull secret name if using an anchore-engine image from a private registry
imagePullSecretName:
# Set this value to True to setup the chart for OpenShift deployment compatibility.
openShiftDeployment: False
# Add additionnal labels to all kubernetes resources
labels: {}
# app.kubernetes.io/managed-by: Helm
# foo: bar
# Set extra environment variables. These will be set on all containers.
extraEnv: []
# - name: foo
# value: bar
# Specifies an existing secret to be used for admin and db passwords
existingSecret: Null
# The scratchVolume controls the mounting of an external volume for scratch space for image analysis. Generally speaking
# you need to provision 3x the size of the largest image (uncompressed) that you want to analyze for this space.
scratchVolume:
mountPath: /analysis_scratch
details:
# Specify volume configuration here
emptyDir: {}
# A secret must be created in the same namespace as anchore-engine is deployed, containing the certificates & public/private keys used for SSL, SAML & custom CAs.
# Certs and keys should be added using the file name the certificate is stored at. This secret will be mounted to /home/anchore/certs.
certStoreSecretName: Null
# Specify your pod securityContext here, by default the anchore images utilize the user/group 'anchore' using uid/gid 1000
# To disable this securityContext comment out `runAsUser` & `runAsGroup`
securityContext:
runAsUser: 1000
runAsGroup: 1000
###
# Start of General Anchore Engine Configurations (populates /config/config.yaml)
###
# Set where default configs are placed at startup. This must be a writable location for the pod.
serviceDir: /anchore_service
logLevel: INFO
cleanupImages: true
# Define timeout, in seconds, for image analysis
imageAnalyzeTimeoutSeconds: 36000
# If true, when a user adds an ECR registry with username = awsauto then the system will look for an instance profile to use for auth against the registry
allowECRUseIAMRole: false
# Enable prometheus metrics
enableMetrics: true
# Disable auth on prometheus metrics
metricsAuthDisabled: false
# Sets the password & email address for the default anchore-engine admin user.
defaultAdminPassword: KebabNinja2020
defaultAdminEmail: jonas.juselius@tromso.serit.no
saml:
# Locations for keys used for signing and encryption. Only one of 'secret' or 'public_key_path'/'private_key_path' needs to be set. If all are set then the keys take precedence over the secret value
# Secret is for a shared secret and if set, all components in anchore should have the exact same value in their configs.
secret: Null
privateKeyName: Null
publicKeyName: Null
oauthEnabled: false
oauthTokenExpirationSeconds: 3600
# Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you
# don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well)
# WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords
hashedPasswords: false
# Configure the database connection within anchore-engine & enterprise-ui. This may get split into 2 different configurations based on service utilized.
dbConfig:
timeout: 120
# Use ssl, but the default postgresql config in helm's stable repo does not support ssl on server side, so this should be set for external dbs only.
# All ssl dbConfig values are only utilized when ssl=true
ssl: false
sslMode: verify-full
# sslRootCertName is the name of the postgres root CA certificate stored in anchoreGlobal.certStoreSecretName
sslRootCertName: Null
connectionPoolSize: 30
connectionPoolMaxOverflow: 100
internalServicesSsl:
# Enable to force all anchore-engine services to communicate internally using SSL
enabled: false
# specify whether cert is verfied against the local certifacte bundle (allow self-signed certs if set to false)
verifyCerts: false
certSecretKeyName: Null
certSecretCertName: Null
# To enable webhooks, set webhooksEnabled: true
webhooksEnabled: true
# Configure webhook outputs here. The service provides these webhooks for notifying external systems of updates
webhooks:
# User and password to be set (using HTTP basic auth) on all webhook calls if necessary
webhook_user: Null
webhook_pass: Null
ssl_verify: false
# Endpoint for general notification delivery. These events are image/tag updates etc. This is globally configured
# and updates for all users are sent to the same host but with a different path for each user.
# <notification_type>/<userId> are required as documented at end of URI - only hostname:port should be configured.
general:
url: http://busynix.default
# url: "http://somehost:9090/<notification_type>/<userId>"
# Allow configuration of Kubernetes probes
probes:
liveness:
initialDelaySeconds: 120
timeoutSeconds: 10
periodSeconds: 10
failureThreshold: 6
successThreshold: 1
readiness:
timeoutSeconds: 10
periodSeconds: 10
failureThreshold: 3
successThreshold: 1
# Configuration for the analyzer pods that perform image analysis
# There may be many of these analyzers but best practice is to not have more than one per node since analysis
# is very IO intensive. Use of affinity/anti-affinity rules for scheduling the analyzers is future work.
anchoreAnalyzer:
replicaCount: 1
containerPort: 8084
# Set extra environment variables. These will be set only on analyzer containers.
extraEnv: []
# - name: foo
# value: bar
# The cycle timer is the interval between checks to the work queue for new jobs
cycleTimers:
image_analyzer: 5
# Controls the concurrency of the analyzer itself. Can be configured to process more than one task at a time, but it IO bound, so may not
# necessarily be faster depending on hardware. Should test and balance this value vs. number of analyzers for your deployment cluster performance.
concurrentTasksPerWorker: 1
# Image layer caching can be enabled to speed up image downloads before analysis.
# This chart sets up a scratch directory for all analyzer pods using the values found at anchoreGlobal.scratchVolume.
# When setting anchoreAnalyzer.layerCacheMaxGigabytes, ensure the scratch volume has suffient storage space.
# For more info see - https://docs.anchore.com/current/docs/engine/engine_installation/storage/layer_caching/
# Enable image layer caching by setting a cache size > 0GB.
layerCacheMaxGigabytes: 0
# Enable the ability to read a user-supplied 'hints' file to allow users to override and/or augment the software artifacts that are discovered by anchore during its image analysis process.
# Once enabled, the analyzer services will look for a file with a specific name, location and format located within the container image - /anchore_hints.json
# For more info see - https://docs.anchore.com/current/docs/engine/engine_installation/configuration/content_hints
enableHints: false
configFile:
# Anchore analyzer config file
#
# WARNING - malforming this file can cause the analyzer to fail on all image analysis
#
# Options for any analyzer module(s) that takes customizable input
#
# example configuration for the 'retrieve_files' analyzer, if installed
retrieve_files:
file_list:
- '/etc/passwd'
# - '/etc/services'
# - '/etc/sudoers'
# example configuration for the 'content_search' analyze, if installed
secret_search:
match_params:
- MAXFILESIZE=10000
- STOREONMATCH=n
regexp_match:
- "AWS_ACCESS_KEY=(?i).*aws_access_key_id( *=+ *).*(?<![A-Z0-9])[A-Z0-9]{20}(?![A-Z0-9]).*"
- "AWS_SECRET_KEY=(?i).*aws_secret_access_key( *=+ *).*(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=]).*"
- "PRIV_KEY=(?i)-+BEGIN(.*)PRIVATE KEY-+"
- "DOCKER_AUTH=(?i).*\"auth\": *\".+\""
- "API_KEY=(?i).*api(-|_)key( *=+ *).*(?<![A-Z0-9])[A-Z0-9]{20,60}(?![A-Z0-9]).*"
# - "ALPINE_NULL_ROOT=^root:::0:::::$"
# content_search:
# match_params:
# - MAXFILESIZE=10000
# regexp_match:
# - "EXAMPLE_MATCH="
# Uncomment the 'malware' section to enable use of the open-source ClamAV malware scanner to detect malicious code embedded in container images.
# This scan occurs only at analysis time when the image content itself is available, and the scan results are available via the Engine API as well as
# for consumption in new policy gates to allow gating of image with malware findings.
# For more detailed configuration info see - https://docs.anchore.com/current/docs/engine/general/concepts/images/analysis/malware_scanning
#
malware:
clamav:
enabled: true
db_update_enabled: true
# resources:
# limits:
# cpu: 1
# memory: 4G
# requests:
# cpu: 1
# memory: 1G
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod configuration for the anchore engine api service.
anchoreApi:
replicaCount: 1
# Set extra environment variables. These will be set on all api containers.
extraEnv: []
# - name: foo
# value: bar
# kubernetes service configuration for anchore external API
service:
type: ClusterIP
port: 8228
annotations: {}
label: {}
# (Optional) Overrides for constructing API URLs. All values are optional.
# external:
# use_tls: true
# hostname: anchore-api.example.com
# port: 8443
# resources:
# limits:
# cpu: 1
# memory: 4G
# requests:
# cpu: 100m
# memory: 1G
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
anchoreCatalog:
replicaCount: 1
# Set extra environment variables. These will be set on all catalog containers.
extraEnv: []
# - name: foo
# value: bar
# Intervals to run specific events on (seconds)
cycleTimers:
# Interval to check for an update to a tag
image_watcher: 3600
# Interval to run a policy evaluation on images with the policy_eval subscription activated.
policy_eval: 3600
# Interval to run a vulnerability scan on images with the vuln_update subscription activated.
vulnerability_scan: 14400
# Interval at which the catalog looks for new work to put on the image analysis queue.
analyzer_queue: 1
# Interval notifications will be processed for state changes
notifications: 30
# Intervals service state updates are polled for the system status
service_watcher: 15
# Interval between checks to repo for new tags
repo_watcher: 60
# Event log configuration for webhooks
events:
notification:
enabled: false
# Send notifications for events with severity level that matches items in this list
level:
- error
# - info
archive:
compression:
enabled: true
min_size_kbytes: 100
storage_driver:
# Valid storage driver names: 'db', 's3', 'swift'
name: s3
config:
url: https://minio.staging.itpartner.no
bucket: anchore
access_key: Mkd324ijlnfll23883
secret_key: KJQfefrnflol93jpj31mrkjs3i88sj2L
create_bucket: true
# kubernetes service configuration for anchore catalog api
service:
type: ClusterIP
port: 8082
annotations: {}
labels: {}
# resources:
# limits:
# cpu: 1
# memory: 2G
# requests:
# cpu: 100m
# memory: 500M
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod configuration for the anchore engine policy service.
anchorePolicyEngine:
replicaCount: 1
# Set extra environment variables. These will be set on all policy engine containers.
extraEnv: []
# - name: foo
# value: bar
# Intervals to run specific events on (seconds)
cycleTimers:
# Interval to run a feed sync to get latest cve data
feed_sync: 14400
# Interval between checks to see if there needs to be a task queued
feed_sync_checker: 3600
# kubernetes service configuration for anchore policy engine api
service:
type: ClusterIP
port: 8087
annotations: {}
labels: {}
# resources:
# limits:
# cpu: 1
# memory: 4G
# requests:
# cpu: 100m
# memory: 1G
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
# Pod configuration for the anchore engine simplequeue service.
anchoreSimpleQueue:
replicaCount: 1
# Set extra environment variables. These will be set on all simplequeue containers.
extraEnv: []
# - name: foo
# value: bar
# kubernetes service configuration for anchore simplequeue api
service:
type: ClusterIP
port: 8083
annotations: {}
labels: {}
# resources:
# limits:
# cpu: 1
# memory: 1G
# requests:
# cpu: 100m
# memory: 256M
labels: {}
annotations: {}
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: busynix
labels:
app: busynix
spec:
replicas: 1
selector:
matchLabels:
app: busynix
template:
metadata:
labels:
app: busynix
spec:
dnsPolicy: Default
containers:
- image: registry.gitlab.com/serit/k8s/busynix:1.1
name: busynix
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- "-c"
- "while true; do echo ping | nc -l -p 8000; done"
ports:
- containerPort: 8000
imagePullSecrets:
- name: gitlab-pull-secret

12
charts/busynix/deploy.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/usr/bin/env bash
namespace=default
charts=(
busynix.yaml
)
. ../config.sh
kubectl_apply $namespace "${charts[@]}"

View File

@@ -0,0 +1,9 @@
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lVWXVkVEZ2VEpEYW1QUHRSNFh6dzJTMGROMzZjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEU1TVRBeE5UQTVNakV3Ck1Gb1hEVEkwTVRBeE16QTVNakV3TUZvd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUsKRXdWT2FYaFBVekVuTUNVR0ExVUVDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWagpNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVrV0lONVhwRkI2NTJuazdzUGx1CklqNTZTY05sakpOV0JZZXNWaDE4MjhNUE9VVHpuZTgxeUhTMHg3WFVtcGN5VDdDNmRQdlVWckdZUUZCMnZqN0oKcm9Td04xWGtZeW10YXVQVFpqV2J6ZVdSbG1nZ0dpajFhUUZOZy9LelkzZTBkcGdVUEk3TXZQcjFJU001c3JuWgp0YlRRckZmMkVsc3dvVENPMWNYRGMyNTJnblphRW56ZlJEN0o2eXoyVXYvMUZSei9aY29DZVViSnJkNWJjTkk5CmdKYU95MDE0MEdHRzY3WWRtZXBjQWJXeTFOYkFNWlJCamFUUStmZUVWa0p4UGNRNWZqMUhrQ0RuTHJjeEpmdzEKSWhvZFZlNFdLTkhyaUFGR3JTS3JIM1VLb045RFVtU1RGOVVEUWtOQS9zNTRkaEJyMWJFa25lMW1EbGwwYWZhWApqd0lEQVFBQm8wSXdRREFPQmdOVkhROEJBZjhFQkFNQ0FRWXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WCkhRNEVGZ1FVUFQ2THhyWkNVcFEyakRtQ01DRGQ2aVVMbXdnd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLaXMKaFc2bGRBVjZjbnZlM3dhem5aRTVzT1B5T1hRWFdEc2x0a3RzU3B6WU9ocjU5WTZ3Q2oxL05vbGRRaXY4UUJ4TwpQYVF3QUJSL3Evc0p1bUhEODJwSzRwbDZXOThWTDFPdW1wOEoxZjFGV0ZqKzJpMitObG1UUzlHU3FhcGRMdTJoCk9YTUJ6M0JDZFZWSVQ4RGhzZVRBL21WeXlRV1h3LzlsUTYxT1N1c1BubmUwcDBwbkJGTU1TVUhNWFZWeGsrK2UKYzdNTGVVcXFObHliM1JDeGNVQkVTa1h3d1dIaFhhdVR0OTlGQjB5SFNKeTMxd0FNL2pHeUJhdlphb3VMRGticQowNXdwQ3dxQzl6SEFlZElUcThXOUhOQVA5Q1FjejdsV3lDRHhkZ2orN2hoeEdrUUoyYmpFMGxlWlA1bXphRXUyCjdPYUlDVkR0cGE5T2FXY3FpSUE9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdWtXSU41WHBGQjY1Mm5rN3NQbHVJajU2U2NObGpKTldCWWVzVmgxODI4TVBPVVR6Cm5lODF5SFMweDdYVW1wY3lUN0M2ZFB2VVZyR1lRRkIydmo3SnJvU3dOMVhrWXltdGF1UFRaaldiemVXUmxtZ2cKR2lqMWFRRk5nL0t6WTNlMGRwZ1VQSTdNdlByMUlTTTVzcm5adGJUUXJGZjJFbHN3b1RDTzFjWERjMjUyZ25aYQpFbnpmUkQ3SjZ5ejJVdi8xRlJ6L1pjb0NlVWJKcmQ1YmNOSTlnSmFPeTAxNDBHR0c2N1lkbWVwY0FiV3kxTmJBCk1aUkJqYVRRK2ZlRVZrSnhQY1E1ZmoxSGtDRG5McmN4SmZ3MUlob2RWZTRXS05IcmlBRkdyU0tySDNVS29OOUQKVW1TVEY5VURRa05BL3M1NGRoQnIxYkVrbmUxbURsbDBhZmFYandJREFRQUJBb0lCQURlY2RyU1Jyd3B3dWUvOQo3ZEpDUlowM3BlN0x4VStZMVQvRlo3QTJFWWJTejRLN2tUZi9xTUQyQnRydzhFNVBBWmhGSFFXOEFZTDRkb29NCis4YUVuRlhjQzNnWnpwdkVYNS9mOE1jOURnMEVrdGlpc2FMZHc1YmI2cmFRQ1BxVFRHOXpxK2NBZ3lPUWtRcVoKcDZveVRrc3ZkblZkdFh0NWVpVkZoLzlPa2JHdUw3Y0RFNm03c1dDOVIwemdQQmFtTThXNDBJaGJqcVBod25mSgpYMmd0TkFzMjhpNE05aHIweVlKbTk4VHh5Tk9laVlxdFRrRkFROGVRRzRvck9mSDVvTTdONmcxc295VHpYOFlhCkorZHdHQnpuMFFQV25UVGpMUy9uSm9Ld3BEc3AySngvbTluSUFGanl4WERkT0xHVERQMW1hbFZsVHAyM1VMV0gKNE8vd1hJRUNnWUVBOVBTZ1BmSzNCZjBaeEpHNVFKWGNIdzRUM1pWSFRVV2FzNFpoc1hCRWd0SnQ5NHdIamh2egpuUTV0LzJlMlNNSXU1Q1g5MnRVYjBGQTRqRW8ybDdBNTlXY2NscWRQS0pQR2QveENvbXFQcTZsK0NWckh4NkhsCncvRTJpdHRrVWpicDF2ZjlFWTBsRGwwdkJ1QzVzQlY5OEZET2lFaThCSC92VnowR3QvZm53cThDZ1lFQXdxdU4KZkRwZUtEOHVWZ3lOQUdrSVlXWDl1RFkvL2pENHdQVXJGVktIalB4Y2ZWUDAra2FYVzNnM0cwcnBaU2xLME1LeQprdk9PaW1wb0svUk5wMUJ4bHZnSkdaL0NZeHNsWWZhSUtEenR1OGRnTlFKbktjU0s4S3krQ1c5cWQ2SEk3MGRUCnRManN6TnlGWnR6YmxQVDlJS1ZVb09kUjNUZ0JEOFpaNW1VZE1TRUNnWUVBMTBWZ3lmQlRMbDBuVnh2bDNUK1cKU21oNXh1TVkzV3pOTHZaclJwL3VaYlgrMUcxb2VsaFFPUGRrYmUrOFA2Yi94SnNxeERWREUwaGMyZFdpc3A0NQo0VlB6eU9aU1o4dXV3T1dkdmRTK1hGTkJJNEYzVHhjVnNLUjhsWDJIWmNWQ3Jod1VlR2M5YUtrMTJlcUc1WnVOCnFUT3F2aFNGdjhLYkdXVFZVYm41SUpzQ2dZQXNiQTlXT00rQnFhRStPNUQxZGdKV2ozcU5UYnZlbW5nNU4va0QKM1poWEFNTnVTZmxPSDB1c3NNc0tiRmJFSWQ4VHNrb2pwSFp6ZVlLWi9SNjNQbkVkUFM1K2JXbGpTaStHb2taQgp3RjJUaXhTV0pCMDhkOEFvMlpKbi9zZXgwdlpTTzltTEZPUGNmN25sVmlLNVpBcGJKNzhmRklvbXkvL2FCSzVCCkYvUElZUUtCZ0J1dVR1MVFhVVBTczE2Zi81MTVPSjJaQXYvOE82bXRmWjJnQ0RJSWYvZG04VFFOQTE5UnVBeXUKYU1WMGxPMXphSC9tR0ZFSkRyYUNFVkFSd1MzNkY0eWFyNUI4WDdtaTVsdGZUTm5hZEsyZDJuaWRldjFMWlZETgo2K3pHaUpIb1BTaEJXRjdYanh5aEdwcmdGczZhdE5Fc28zTFpHMEdZSHBHOFNGakljb1VmCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: cluster-ca
namespace: cert-manager
type: kubernetes.io/tls

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
version="v1.0.4"
namespace=kube-system
charts=(
cluster-ca.yaml
cluster-issuer.yaml
front-proxy-client.yaml
)
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
kubectl apply \
-n $namespace \
--validate=false \
-f https://github.com/jetstack/cert-manager/releases/download/$version/cert-manager.yaml

34
charts/config.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/usr/bin/env bash
vars=(
initca="@initca@"
apiserver="@apiserver@"
cluster="@cluster@"
ingress_nodes="@ingress_nodes@"
filseserver="@fileserver@"
acme_email="@acme_email@"
grafana_ldap_toml="@grafana_ldap_toml@"
grafana_smtp_user="@grafana_smtp_user@"
grafana_smtp_password="@grafana_smtp_password@"
)
make_substitutions () {
read x
for i in "${vars[@]}"; do
k=$(echo "$i" | cut -d= -f1)
v=$(echo "$i" | cut -d= -f2)
echo "$x" | sed "s/@$k@/$v/g"
done
}
kubectl_apply () {
ns=$1; shift
charts=("$@")
for i in "${charts[@]}"; do
k=$(echo "$i" | cut -d= -f1)
v=$(echo "$i" | cut -d= -f2)
sed "s/@$k@/$v/g" $i | kubectl apply -n $ns -f -
done
}
# vim:ft=sh

19
charts/ingress-nginx/deploy.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
namespace=kube-system
charts=()
. ../config.sh
kubectl_apply "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
ingress \
ingress-nginx/ingress-nginx \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -0,0 +1,688 @@
## nginx configuration
## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/controllers/nginx/configuration.md
##
controller:
image:
repository: k8s.gcr.io/ingress-nginx/controller
tag: "v0.40.2"
digest: sha256:46ba23c3fbaafd9e5bd01ea85b2f921d9f2217be082580edc22e6c704a83f02f
pullPolicy: IfNotPresent
# www-data -> uid 101
runAsUser: 101
allowPrivilegeEscalation: true
# Configures the ports the nginx-controller listens on
containerPort:
http: 80
https: 443
# Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
config: {}
## Annotations to be added to the controller config configuration configmap
##
configAnnotations: {}
# Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers
proxySetHeaders: {}
# Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers
addHeaders: {}
# Optionally customize the pod dnsConfig.
dnsConfig: {}
# Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.
# By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller
# to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.
dnsPolicy: ClusterFirst
# Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network
# Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply
reportNodeInternalIp: false
# Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),
# since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920
# is merged
hostNetwork: true
## Use host ports 80 and 443
## Disabled by default
##
hostPort:
enabled: false
ports:
http: 80
https: 443
## Election ID to use for status update
##
electionID: ingress-controller-leader
## Name of the ingress class to route through this controller
##
ingressClass: nginx
# labels to add to the pod container metadata
podLabels: {}
# key: value
## Security Context policies for controller pods
##
podSecurityContext: {}
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
## notes on enabling and using sysctls
###
sysctls: {}
# sysctls:
# "net.core.somaxconn": "8192"
## Allows customization of the source of the IP address or FQDN to report
## in the ingress status field. By default, it reads the information provided
## by the service. If disable, the status field reports the IP address of the
## node or nodes where an ingress controller pod is running.
publishService:
enabled: true
## Allows overriding of the publish service to bind to
## Must be <namespace>/<service_name>
##
pathOverride: ""
## Limit the scope of the controller
##
scope:
enabled: false
namespace: "" # defaults to .Release.Namespace
## Allows customization of the configmap / nginx-configmap namespace
##
configMapNamespace: "" # defaults to .Release.Namespace
## Allows customization of the tcp-services-configmap
##
tcp:
configMapNamespace: "" # defaults to .Release.Namespace
## Annotations to be added to the tcp config configmap
annotations: {}
## Allows customization of the udp-services-configmap
##
udp:
configMapNamespace: "" # defaults to .Release.Namespace
## Annotations to be added to the udp config configmap
annotations: {}
## Additional command line arguments to pass to nginx-ingress-controller
## E.g. to specify the default SSL certificate you can use
## extraArgs:
## default-ssl-certificate: "<namespace>/<secret_name>"
extraArgs: {}
## Additional environment variables to set
extraEnvs: []
# extraEnvs:
# - name: FOO
# valueFrom:
# secretKeyRef:
# key: FOO
# name: secret-resource
## DaemonSet or Deployment
##
kind: Deployment
## Annotations to be added to the controller Deployment or DaemonSet
##
annotations: {}
# keel.sh/pollSchedule: "@every 60m"
## Labels to be added to the controller Deployment or DaemonSet
##
labels: {}
# keel.sh/policy: patch
# keel.sh/trigger: poll
# The update strategy to apply to the Deployment or DaemonSet
##
updateStrategy: {}
# rollingUpdate:
# maxUnavailable: 1
# type: RollingUpdate
# minReadySeconds to avoid killing pods before we are ready
##
minReadySeconds: 0
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Equal
value: "true"
- effect: NoSchedule
key: unschedulable
operator: Equal
value: "true"
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
## Affinity and anti-affinity
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: [ @ingress_nodes@ ]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- ingress-nginx
- key: app.kubernetes.io/instance
operator: In
values:
- ingress-nginx
- key: app.kubernetes.io/component
operator: In
values:
- controller
topologyKey: "kubernetes.io/hostname"
# # An example of preferred pod anti-affinity, weight is in the range 1-100
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: kubernetes.io/hostname
## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
##
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: failure-domain.beta.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: ingress-nginx-internal
## terminationGracePeriodSeconds
## wait up to five minutes for the drain of connections
##
terminationGracePeriodSeconds: 300
## Node labels for controller pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector:
kubernetes.io/os: linux
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 5
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
port: 10254
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
port: 10254
# Path of the health check endpoint. All requests received on the port defined by
# the healthz-port parameter are forwarded internally to this path.
healthCheckPath: "/healthz"
## Annotations to be added to controller pods
##
podAnnotations: {}
replicaCount: @ingress_replicas@
minAvailable: 3
# Define requests resources to avoid probe issues due to CPU utilization in busy nodes
# ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903
# Ideally, there should be no limits.
# https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/
resources:
# limits:
# cpu: 100m
# memory: 90Mi
requests:
cpu: 100m
memory: 90Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 11
targetCPUUtilizationPercentage: 50
targetMemoryUtilizationPercentage: 50
autoscalingTemplate: []
# Custom or additional autoscaling metrics
# ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics
# - type: Pods
# pods:
# metric:
# name: nginx_ingress_controller_nginx_process_requests_total
# target:
# type: AverageValue
# averageValue: 10000m
## Enable mimalloc as a drop-in replacement for malloc.
## ref: https://github.com/microsoft/mimalloc
##
enableMimalloc: true
## Override NGINX template
customTemplate:
configMapName: ""
configMapKey: ""
service:
enabled: true
annotations: {}
labels: {}
# clusterIP: ""
## List of IP addresses at which the controller services are available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
enableHttp: true
enableHttps: true
## Set external traffic policy to: "Local" to preserve source IP on
## providers supporting it
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
# externalTrafficPolicy: ""
# Must be either "None" or "ClientIP" if set. Kubernetes will default to "None".
# Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
# sessionAffinity: ""
# specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isnt specified,
# the service controller allocates a port from your clusters NodePort range.
# Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# healthCheckNodePort: 0
ports:
http: 80
https: 443
targetPorts:
http: http
https: https
type: ClusterIP
# type: NodePort
# nodePorts:
# http: 32080
# https: 32443
# tcp:
# 8080: 32808
nodePorts:
http: 30080
https: 30443
tcp: {}
udp: {}
## Enables an additional internal load balancer (besides the external one).
## Annotations are mandatory for the load balancer to come up. Varies with the cloud service.
internal:
enabled: false
annotations: {}
## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0.
loadBalancerSourceRanges: []
## Set external traffic policy to: "Local" to preserve source IP on
## providers supporting it
## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer
# externalTrafficPolicy: ""
extraContainers: []
## Additional containers to be added to the controller pod.
## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.
# - name: my-sidecar
# image: nginx:latest
# - name: lemonldap-ng-controller
# image: lemonldapng/lemonldap-ng-controller:0.2.0
# args:
# - /lemonldap-ng-controller
# - --alsologtostderr
# - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration
# env:
# - name: POD_NAME
# valueFrom:
# fieldRef:
# fieldPath: metadata.name
# - name: POD_NAMESPACE
# valueFrom:
# fieldRef:
# fieldPath: metadata.namespace
# volumeMounts:
# - name: copy-portal-skins
# mountPath: /srv/var/lib/lemonldap-ng/portal/skins
extraVolumeMounts: []
## Additional volumeMounts to the controller main container.
# - name: copy-portal-skins
# mountPath: /var/lib/lemonldap-ng/portal/skins
extraVolumes: []
## Additional volumes to the controller pod.
# - name: copy-portal-skins
# emptyDir: {}
extraInitContainers: []
## Containers, which are run before the app containers are started.
# - name: init-myservice
# image: busybox
# command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;']
admissionWebhooks:
annotations: {}
enabled: true
failurePolicy: Fail
# timeoutSeconds: 10
port: 8443
certificate: "/usr/local/certificates/cert"
key: "/usr/local/certificates/key"
namespaceSelector: {}
objectSelector: {}
service:
annotations: {}
# clusterIP: ""
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 443
type: ClusterIP
patch:
enabled: true
image:
repository: docker.io/jettech/kube-webhook-certgen
tag: v1.5.0
pullPolicy: IfNotPresent
## Provide a priority class name to the webhook patching job
##
priorityClassName: ""
podAnnotations: {}
nodeSelector: {}
tolerations: []
runAsUser: 2000
metrics:
port: 10254
# if this port is changed, change healthz-port: in extraArgs: accordingly
enabled: true
service:
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "10254"
# clusterIP: ""
## List of IP addresses at which the stats-exporter service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 9913
type: ClusterIP
# externalTrafficPolicy: ""
# nodePort: ""
serviceMonitor:
enabled: true
additionalLabels: {}
namespace: ""
namespaceSelector: {}
# Default: scrape .Release.Namespace only
# To scrape all, use the following:
# namespaceSelector:
# any: true
scrapeInterval: 30s
# honorLabels: true
targetLabels: []
metricRelabelings: []
prometheusRule:
enabled: false
additionalLabels: {}
# namespace: ""
rules: []
# # These are just examples rules, please adapt them to your needs
# - alert: NGINXConfigFailed
# expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0
# for: 1s
# labels:
# severity: critical
# annotations:
# description: bad ingress config - nginx config test failed
# summary: uninstall the latest ingress changes to allow config reloads to resume
# - alert: NGINXCertificateExpiry
# expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800
# for: 1s
# labels:
# severity: critical
# annotations:
# description: ssl certificate(s) will expire in less then a week
# summary: renew expiring certificates to avoid downtime
# - alert: NGINXTooMany500s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: warning
# annotations:
# description: Too many 5XXs
# summary: More than 5% of all requests returned 5XX, this requires your attention
# - alert: NGINXTooMany400s
# expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5
# for: 1m
# labels:
# severity: warning
# annotations:
# description: Too many 4XXs
# summary: More than 5% of all requests returned 4XX, this requires your attention
## Improve connection draining when ingress controller pod is deleted using a lifecycle hook:
## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds
## to 300, allowing the draining of connections up to five minutes.
## If the active connections end before that, the pod will terminate gracefully at that time.
## To effectively take advantage of this feature, the Configmap feature
## worker-shutdown-timeout new value is 240s instead of 10s.
##
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
priorityClassName: ""
## Rollback limit
##
revisionHistoryLimit: 10
# Maxmind license key to download GeoLite2 Databases
# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases
maxmindLicenseKey: ""
## Default 404 backend
##
defaultBackend:
##
enabled: true
image:
repository: k8s.gcr.io/defaultbackend-amd64
tag: "1.5"
pullPolicy: IfNotPresent
# nobody user -> uid 65534
runAsUser: 65534
extraArgs: {}
serviceAccount:
create: true
name:
## Additional environment variables to set for defaultBackend pods
extraEnvs: []
port: 8080
## Readiness and liveness probes for default backend
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 0
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
## Node tolerations for server scheduling to nodes with taints
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
##
tolerations: []
# - key: "key"
# operator: "Equal|Exists"
# value: "value"
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
affinity: {}
## Security Context policies for controller pods
## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for
## notes on enabling and using sysctls
##
podSecurityContext: {}
# labels to add to the pod container metadata
podLabels: {}
# key: value
## Node labels for default backend pod assignment
## Ref: https://kubernetes.io/docs/user-guide/node-selection/
##
nodeSelector: {}
## Annotations to be added to default backend pods
##
podAnnotations: {}
replicaCount: 1
minAvailable: 1
resources: {}
# limits:
# cpu: 10m
# memory: 20Mi
# requests:
# cpu: 10m
# memory: 20Mi
service:
annotations: {}
# clusterIP: ""
## List of IP addresses at which the default backend service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# loadBalancerIP: ""
loadBalancerSourceRanges: []
servicePort: 80
type: ClusterIP
priorityClassName: ""
## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266
rbac:
create: true
scope: false
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
serviceAccount:
create: true
name:
## Optional array of imagePullSecrets containing private registry credentials
## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: secretName
# TCP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
##
tcp: {}
# 8080: "default/example-tcp-svc:9000"
# UDP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
##
udp: {}
# 53: "kube-system/kube-dns:53"

16
charts/kube-system/deploy.sh Executable file
View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
namespace=kube-system
charts=(
cluster-auth-rbac.yaml
kube-proxy.yaml
)
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
kubectl delete secret cluster-ca -n $namespace
kubectl create secret tls cluster-ca \
--namespace=$namespace --cert=$initca/ca.pem --key=$initca/ca-key.pem

View File

@@ -17,8 +17,7 @@ kind: ClusterRole
metadata:
name: kube-proxy-role
rules:
-
apiGroups:
- apiGroups:
- ""
resources:
- endpoints
@@ -28,9 +27,7 @@ rules:
verbs: ["get", "watch", "list"]
- nonResourceURLs: ["*"]
verbs: ["get", "watch", "list"]
-
apiGroups:
- apiGroups:
- ""
resources:
- events

View File

@@ -12,7 +12,7 @@ metadata:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
rules:
- host: dashboard.k2.itpartner.no
- host: dashboard.@cluster@.itpartner.no
http:
paths:
- backend:
@@ -20,5 +20,5 @@ spec:
servicePort: 443
tls:
- hosts:
- dashboard.k2.itpartner.no
- dashboard.@cluster@.itpartner.no
secretName: kubernetes-dashboard-tls

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
version="v2.0.4"
namespace=kubernetes-dashboard
charts=(
dashboard-ingress.yaml
)
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
kubectl apply \
-n $namespace \
-f https://raw.githubusercontent.com/kubernetes/dashboard/$version/aio/deploy/recommended.yaml

19
charts/metrics-server/deploy.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
namespace=kube-system
charts=()
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
metrics-server \
stable/metrics-server \
| make_substitutions \
| sed 's/8443/6443/g' \
| kubectl apply -f -

18
charts/minio/deploy.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=minio
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
minio minio/minio \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=kube-system
charts=()
. ../config.sh
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
nfs-client-provisioner \
stable/nfs-client-provisioner \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -0,0 +1,19 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: app-monitor
release: prometheus-operator # required by Prometheus CRD
name: app-monitor
namespace: kube-system
spec:
endpoints:
- interval: 30s
port: http
path: /metrics
jobLabel: app.kubernetes.io/instance
namespaceSelector:
any: true
selector:
matchLabels:
prometheus.io/monitor: http

View File

@@ -0,0 +1,57 @@
#!/usr/bin/env bash
crd_version="v0.42.0"
namespace=prometheus
charts=(
etcd-cert-secret.yaml
app-servicemonitor.yaml
grafana-ldap-toml.yaml
grafana-smtp-secret.yaml
prometheus-etcd-cert.yaml
gitlab-redis-servicemonitor.yaml
gitlab-servicemonitor.yaml
ingress-nginx-servicemonitor.yaml
)
. ../config.sh
install_prometheus_crds () {
crd=(
monitoring.coreos.com_alertmanagers.yaml
monitoring.coreos.com_podmonitors.yaml
monitoring.coreos.com_probes.yaml
monitoring.coreos.com_prometheuses.yaml
monitoring.coreos.com_prometheusrules.yaml
monitoring.coreos.com_servicemonitors.yaml
monitoring.coreos.com_thanosrulers.yaml
)
url=https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.42.0/example/prometheus-operator-crd/
for i in ${crd[@]}; do
kubectl apply -f $url/$i
done
}
disable_unset () {
read yaml
# disable ldap for grafana
[ -z "$grafana_ldap_toml" ] && \
echo "$yaml" | sed '/auth\.ldap:/,+1 s/true/false/; /ldap:/,+1 d' | read yaml
# disable storage
[ -z "$fileserver" ] && \
echo "$yaml" | sed '/prometheusSpec:/,+10d' $1 | read yaml
echo $yaml
}
kubectl create ns $namespace
install_prometheus_crds
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
prometheus \
prometheus-community/kube-prometheus-stack \
| make_substitutions \
| kubectl -n $namespace -f -

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: prometheus-operator
name: etcd-cert
namespace: prometheus
data:
ca.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lVWXVkVEZ2VEpEYW1QUHRSNFh6dzJTMGROMzZjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEU1TVRBeE5UQTVNakV3Ck1Gb1hEVEkwTVRBeE16QTVNakV3TUZvd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUsKRXdWT2FYaFBVekVuTUNVR0ExVUVDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWagpNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVrV0lONVhwRkI2NTJuazdzUGx1CklqNTZTY05sakpOV0JZZXNWaDE4MjhNUE9VVHpuZTgxeUhTMHg3WFVtcGN5VDdDNmRQdlVWckdZUUZCMnZqN0oKcm9Td04xWGtZeW10YXVQVFpqV2J6ZVdSbG1nZ0dpajFhUUZOZy9LelkzZTBkcGdVUEk3TXZQcjFJU001c3JuWgp0YlRRckZmMkVsc3dvVENPMWNYRGMyNTJnblphRW56ZlJEN0o2eXoyVXYvMUZSei9aY29DZVViSnJkNWJjTkk5CmdKYU95MDE0MEdHRzY3WWRtZXBjQWJXeTFOYkFNWlJCamFUUStmZUVWa0p4UGNRNWZqMUhrQ0RuTHJjeEpmdzEKSWhvZFZlNFdLTkhyaUFGR3JTS3JIM1VLb045RFVtU1RGOVVEUWtOQS9zNTRkaEJyMWJFa25lMW1EbGwwYWZhWApqd0lEQVFBQm8wSXdRREFPQmdOVkhROEJBZjhFQkFNQ0FRWXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WCkhRNEVGZ1FVUFQ2THhyWkNVcFEyakRtQ01DRGQ2aVVMbXdnd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLaXMKaFc2bGRBVjZjbnZlM3dhem5aRTVzT1B5T1hRWFdEc2x0a3RzU3B6WU9ocjU5WTZ3Q2oxL05vbGRRaXY4UUJ4TwpQYVF3QUJSL3Evc0p1bUhEODJwSzRwbDZXOThWTDFPdW1wOEoxZjFGV0ZqKzJpMitObG1UUzlHU3FhcGRMdTJoCk9YTUJ6M0JDZFZWSVQ4RGhzZVRBL21WeXlRV1h3LzlsUTYxT1N1c1BubmUwcDBwbkJGTU1TVUhNWFZWeGsrK2UKYzdNTGVVcXFObHliM1JDeGNVQkVTa1h3d1dIaFhhdVR0OTlGQjB5SFNKeTMxd0FNL2pHeUJhdlphb3VMRGticQowNXdwQ3dxQzl6SEFlZElUcThXOUhOQVA5Q1FjejdsV3lDRHhkZ2orN2hoeEdrUUoyYmpFMGxlWlA1bXphRXUyCjdPYUlDVkR0cGE5T2FXY3FpSUE9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0=
etcd-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeVNscjhaYVcwUVptMzlDMElNSVV2V0pnek5DQlJvRlRMMUdaZDZqVldSKzRhMWNTCjg0ZVpuNXhEY2xLL2d3Y0JJcWs3dm9kVXIrenRMVWgzVjIxQnA2ZHljVXpzT041T2lna2l3YTBneStRaHpYZVcKdCtxbStpZFQrbTdzSWMzZVFkd1QxRkt0elRjaGEwMmhNRFNOL2RBTkVOZ0hzUDZTdGZGbyt4UDQ5VS9MRURnUQpZYVM1VlM1L2ZsV2NWejYxMlF4UEtZL2hJejMwdEJYNkQxcHFVL3VyRzVtWmtOUnFFYmVlYWpvNEVNVW5BY3NyCkxhS0txYjlxa2NqcVVwS2FzMTJDNmhvYUFRTkxsL3dYMmkvOVhnSTZxZ2Z6QTNhM0xvSkt3VW8xdVhBY1Jyc3AKQTdkTjlwR3dTakhudXMvaUZpdTBaUTM1ZWhHVjNkYVZ3V1NFYVFJREFRQUJBb0lCQVFDdlU0cmhaWDdOYitiNQo2ODBUUUJVVGUwc2pPQmQwZkFNa210cEhrTDlpam1NT0t6WTVTMVo1NXBoaWEveS9GcnZHbWZtWWRoczV1aVo5CjhVc1N5QWNST01CbVA4VWpTNTdvY0UzNndBcDFxc0JMZEkvSWZKeE1LenJtYXdjajcycG52SWtMNVlQVitOY0gKendGT0lvQWZWOVlMZUQ0NzVhVzVMazl4aGxiV2Rzak9VOW9sTThDNUQvTktadFhOUUZod1N4bHBGNTBPUDEvWgpLQU43WWNYYzJWTGVpV0h0OHBqRGFLekxrbGQ5UFZrWGFiU1h0M251cFdSK0szU3NNRmhyMVNMS3RLTytzN0lvCnQyZUxwMEF2YXFDbjk4MkxMNWNHQXNSQXZmd0UyTWU5clE5Sk1EZTlJU2ZzMDZmRVQvcHdkbVQ5ME0zMlBhQ08KUWpFNmpZSUJBb0dCQVBQR1RxMUhQSVhHS0FVTGM3aWVEb00yaSttWEo2Z3VLYUlzcjFjQmMxKzlwYURIM2hneQpPVkdQeXRjOURIeDV5S055U3hSVEJQcTgyMDhLOGJqUncwWEFKeDFoOU5rWDB2RHhSM29EcGtmWThTY3I5TmhECi9Zb2w2NXRMVDdFZW5KMk9JNVpXR2xYMHY0aHpaQmFaVTN3dnNIVGJBbk04VklaZTczWUUzb0c5QW9HQkFOTkEKQitXazRzdjBOQWVZeFZrOFVOOFhYdS9oRVNQOS9MMGZHZXg0QWYrTTM4dENpZ1kyZ0JwcXIxS2hmbmFKUkxJNAp1R1lEbENrMkNYUWtoem0zSklCSGV5dmc5Wk1BbXJHNHU3YnNJS3lyZEQrNW9Hcm5wSjhZMHBaOWtkWjd3VGwrClJYcnFJelFLQnczUlg3azNVb2FZOWVyVkdSaWpnMDM0OEU0VEp6b2RBb0dBZjR6c2o4ZnRTQVdsM1BYZ0IrSlQKcjU0ejJQWnBIcHVBMTBWUVBWTVovaWRwNlZXS3hOaEttMzJFWWRpa2x1RFY5WWVROTBKMUlZajlxejhMdVd1dwpJK2ZsejM3NHNUckErYWNWZ2ljMHN2VTFRUXpENFFDNlFiV1RzTDdZSk5IaW1xSEx4eGFvVXY5cjFFYWtRUnJhCnp1alpDRnVyellYc3FCNDJaMmNtMFhVQ2dZRUFyclR0MHdZaWF2cHpiVFR6UVhHWWtHZ0xvK2RHL2JVNEtGQm8KYUNCM3hwa0RIRjdUTjRBclFhMnBnWFQ5MlpwOHJjZ3ErSE5OUFNmcmVab2NHNmRURUtRTlFhU3ljL2l6OXZjSQpoNmVRL2p4dHo2WDgvV3pGd0s0T3UxYnVIYXdMaVRqY3pXS0Y2cXZBV2JVbXJVOExlVFZYYS9jQTRsZVlhQXZRCjhVRDEyQ0VDZ1lBUHUwdVlZdnZEUkNtMkNrZUw5U0s2T2o3MEtpb3RucmFSbWdTZVVIRHFLL211NExWejM0Q3cKcGJoTUN4a0I3UlljdXRwcFREdnZVa2lRbEpuek5xNk84Zkp0cEh1MTh0b0RyMWFIcGJhY0QxVFhpRFVjY2kwWQo1enpOOXBMUzc3UkFNWVR5MHJxdUs4L1ZBVWNEb0JVQW5yVVVZT0FPTUIzRlVsbzhtdEJFclE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==
etcd.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnVENDQW1tZ0F3SUJBZ0lVSWNFZ2FyYTlXdVI3U0l3MkRyTXhoRDFsUno4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEl3TURreU9ERTJOVEV3Ck1Gb1hEVEl3TVRBeU9ERTJOVEV3TUZvd0R6RU5NQXNHQTFVRUF4TUVhekl0TURDQ0FTSXdEUVlKS29aSWh2Y04KQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1rcGEvR1dsdEVHWnQvUXRDRENGTDFpWU16UWdVYUJVeTlSbVhlbwoxVmtmdUd0WEV2T0htWitjUTNKU3Y0TUhBU0twTzc2SFZLL3M3UzFJZDFkdFFhZW5jbkZNN0RqZVRvb0pJc0d0CklNdmtJYzEzbHJmcXB2b25VL3B1N0NITjNrSGNFOVJTcmMwM0lXdE5vVEEwamYzUURSRFlCN0Qra3JYeGFQc1QKK1BWUHl4QTRFR0drdVZVdWYzNVZuRmMrdGRrTVR5bVA0U005OUxRVitnOWFhbFA3cXh1Wm1aRFVhaEczbm1vNgpPQkRGSndITEt5MmlpcW0vYXBISTZsS1Ntck5kZ3VvYUdnRURTNWY4Rjlvdi9WNENPcW9IOHdOMnR5NkNTc0ZLCk5ibHdIRWE3S1FPM1RmYVJzRW94NTdyUDRoWXJ0R1VOK1hvUmxkM1dsY0ZraEdrQ0F3RUFBYU9CbGpDQmt6QU8KQmdOVkhROEJBZjhFQkFNQ0I0QXdEQVlEVlIwVEFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVTFtNmdXQjZJaDNzdgpTTUdrVHhBZFVTNy9WS013SHdZRFZSMGpCQmd3Rm9BVVBUNkx4clpDVXBRMmpEbUNNQ0RkNmlVTG13Z3dNd1lEClZSMFJCQ3d3S29JS1pYUmpaQzVzYjJOaGJJSVFaWFJqWkM1cmRXSmxNaTVzYjJOaGJJSUVhekl0TUljRUN2MFMKY2pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQURqR3lSbnRndW9QaUFZeGpRbXBPM2dHSWxjTjNCSVBpVEVEZApEUGsxcGJyakt3Z3FGa0JpU3A1ZmxDbFpCS1lXM3pRRWVKVDEyTCtkczhCMWp5WHVyZ1ZtU1RRWURYYkpiTmNoCmY1WjFyMnQzRXBsOEdTTk5Ec2ZETGo5aUhacml3TUlGRk9XOHNpRnk0ek1SQm4wRC9oeS9LUnVRREQxNHIySG0KWVM3Ty9hUTdaNDBiWThvZ0xVd2oyUHE0M1IxWmhGb0JNR1dFNW5jMW9TVkJHS2NQaWxiby9GSHBJTk1tYmdzbwpNK1FGNTkzWTE2S0o2K1FUKzhUZ1MyMVl6dTQ1RTAwOXMvc1piQkZuL0l1WkJxWHFkZEFZclI4Rm44SytBdGZFCnh6aTFLTnZJWTEzcXRrV21LN3hUTVl6TSsxTEVhOStidkxoNG1ybHFlWTVmVnlBOWF3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
type: Opaque

View File

@@ -0,0 +1,20 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: gitlab-redis-monitor
release: prometheus-operator # required by Prometheus CRD
name: gitlab-monitor
namespace: gitlab
spec:
endpoints:
- interval: 30s
port: metrics
path: /metrics
jobLabel: app
namespaceSelector:
matchNmaes:
- gitlab
selector:
matchLabels:
app: redis

View File

@@ -0,0 +1,21 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
annotations:
labels:
app: gitlab-exporter-monitor
release: prometheus-operator
name: gitlab-exporter-monitor
namespace: gitlab
spec:
endpoints:
- interval: 30s
path: /metrics
port: gitlab-exporter
jobLabel: app
namespaceSelector:
matchNames:
- gitlab
selector:
matchLabels:
app: gitlab-exporter

View File

@@ -0,0 +1,23 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: nginx-ingress
release: prometheus-operator # required by Prometheus CRD
name: nginx-ingress
namespace: kube-system
spec:
endpoints:
- interval: 15s
port: metrics
jobLabel: app
selector:
matchExpressions:
- key: app
operator: In
values:
- nginx-ingress
- key: component
operator: In
values:
- controller

View File

@@ -0,0 +1,81 @@
apiVersion: v1
kind: Secret
metadata:
labels:
app: prometheus-operator
name: etcd-cert
namespace: kube-system
type: Opaque
stringData:
etcd.pem: |-
-----BEGIN CERTIFICATE-----
MIIDgTCCAmmgAwIBAgIUfVvzugELXCci7r1kRjPUOaXt2S4wDQYJKoZIhvcNAQEL
BQAwTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQKEwVOaXhPUzEnMCUGA1UE
CxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVjMB4XDTIwMDEwMjE2MDcw
MFoXDTIwMDIwMTE2MDcwMFowDzENMAsGA1UEAxMEazItMDCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBAMkpa/GWltEGZt/QtCDCFL1iYMzQgUaBUy9RmXeo
1VkfuGtXEvOHmZ+cQ3JSv4MHASKpO76HVK/s7S1Id1dtQaencnFM7DjeTooJIsGt
IMvkIc13lrfqpvonU/pu7CHN3kHcE9RSrc03IWtNoTA0jf3QDRDYB7D+krXxaPsT
+PVPyxA4EGGkuVUuf35VnFc+tdkMTymP4SM99LQV+g9aalP7qxuZmZDUahG3nmo6
OBDFJwHLKy2iiqm/apHI6lKSmrNdguoaGgEDS5f8F9ov/V4COqoH8wN2ty6CSsFK
NblwHEa7KQO3TfaRsEox57rP4hYrtGUN+XoRld3WlcFkhGkCAwEAAaOBljCBkzAO
BgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU1m6gWB6Ih3sv
SMGkTxAdUS7/VKMwHwYDVR0jBBgwFoAUPT6LxrZCUpQ2jDmCMCDd6iULmwgwMwYD
VR0RBCwwKoIKZXRjZC5sb2NhbIIQZXRjZC5rdWJlMi5sb2NhbIIEazItMIcECv0S
cjANBgkqhkiG9w0BAQsFAAOCAQEAUfDYNj/Yc4HqMzcez7NGBKOyJsgyuhJ+tnwv
aurgfIVMcePdFLz24guKsGfJotP28M0NCZm7v/2OSPzobXhis1yzZh2rv1KWuEkf
uQheXUZ5u65f9Sc+klO/deubbckBP+0vWg4Ru4v9t+vCXZQt4u4OGqwLsG6KxxtG
yXFSPuOOBLbyZfyGNCkOv64OF0qY648cLRH9mfZ1WOlcRdexLi+mtwQlWlCD+02f
iTZYIYvNHpKb1oa6J7/QguouRTue7ZkQuNG0p7FJiLHs5nt750HKOTsSjxfM5+SA
+rohNvUwao+K7rsLj2k3WSOU/Ju6uSqbtGEFgfh/oUBdkYwKJQ==
-----END CERTIFICATE-----
etcd-key.pem: |-
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAySlr8ZaW0QZm39C0IMIUvWJgzNCBRoFTL1GZd6jVWR+4a1cS
84eZn5xDclK/gwcBIqk7vodUr+ztLUh3V21Bp6dycUzsON5Oigkiwa0gy+QhzXeW
t+qm+idT+m7sIc3eQdwT1FKtzTcha02hMDSN/dANENgHsP6StfFo+xP49U/LEDgQ
YaS5VS5/flWcVz612QxPKY/hIz30tBX6D1pqU/urG5mZkNRqEbeeajo4EMUnAcsr
LaKKqb9qkcjqUpKas12C6hoaAQNLl/wX2i/9XgI6qgfzA3a3LoJKwUo1uXAcRrsp
A7dN9pGwSjHnus/iFiu0ZQ35ehGV3daVwWSEaQIDAQABAoIBAQCvU4rhZX7Nb+b5
680TQBUTe0sjOBd0fAMkmtpHkL9ijmMOKzY5S1Z55phia/y/FrvGmfmYdhs5uiZ9
8UsSyAcROMBmP8UjS57ocE36wAp1qsBLdI/IfJxMKzrmawcj72pnvIkL5YPV+NcH
zwFOIoAfV9YLeD475aW5Lk9xhlbWdsjOU9olM8C5D/NKZtXNQFhwSxlpF50OP1/Z
KAN7YcXc2VLeiWHt8pjDaKzLkld9PVkXabSXt3nupWR+K3SsMFhr1SLKtKO+s7Io
t2eLp0AvaqCn982LL5cGAsRAvfwE2Me9rQ9JMDe9ISfs06fET/pwdmT90M32PaCO
QjE6jYIBAoGBAPPGTq1HPIXGKAULc7ieDoM2i+mXJ6guKaIsr1cBc1+9paDH3hgy
OVGPytc9DHx5yKNySxRTBPq8208K8bjRw0XAJx1h9NkX0vDxR3oDpkfY8Scr9NhD
/Yol65tLT7EenJ2OI5ZWGlX0v4hzZBaZU3wvsHTbAnM8VIZe73YE3oG9AoGBANNA
B+Wk4sv0NAeYxVk8UN8XXu/hESP9/L0fGex4Af+M38tCigY2gBpqr1KhfnaJRLI4
uGYDlCk2CXQkhzm3JIBHeyvg9ZMAmrG4u7bsIKyrdD+5oGrnpJ8Y0pZ9kdZ7wTl+
RXrqIzQKBw3RX7k3UoaY9erVGRijg0348E4TJzodAoGAf4zsj8ftSAWl3PXgB+JT
r54z2PZpHpuA10VQPVMZ/idp6VWKxNhKm32EYdikluDV9YeQ90J1IYj9qz8LuWuw
I+flz374sTrA+acVgic0svU1QQzD4QC6QbWTsL7YJNHimqHLxxaoUv9r1EakQRra
zujZCFurzYXsqB42Z2cm0XUCgYEArrTt0wYiavpzbTTzQXGYkGgLo+dG/bU4KFBo
aCB3xpkDHF7TN4ArQa2pgXT92Zp8rcgq+HNNPSfreZocG6dTEKQNQaSyc/iz9vcI
h6eQ/jxtz6X8/WzFwK4Ou1buHawLiTjczWKF6qvAWbUmrU8LeTVXa/cA4leYaAvQ
8UD12CECgYAPu0uYYvvDRCm2CkeL9SK6Oj70KiotnraRmgSeUHDqK/mu4LVz34Cw
pbhMCxkB7RYcutppTDvvUkiQlJnzNq6O8fJtpHu18toDr1aHpbacD1TXiDUcci0Y
5zzN9pLS77RAMYTy0rquK8/VAUcDoBUAnrUUYOAOMB3FUlo8mtBErQ==
-----END RSA PRIVATE KEY-----
ca.pem: |-
-----BEGIN CERTIFICATE-----
MIIDajCCAlKgAwIBAgIUYudTFvTJDamPPtR4Xzw2S0dN36cwDQYJKoZIhvcNAQEL
BQAwTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQKEwVOaXhPUzEnMCUGA1UE
CxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVjMB4XDTE5MTAxNTA5MjEw
MFoXDTI0MTAxMzA5MjEwMFowTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQK
EwVOaXhPUzEnMCUGA1UECxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVj
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAukWIN5XpFB652nk7sPlu
Ij56ScNljJNWBYesVh1828MPOUTzne81yHS0x7XUmpcyT7C6dPvUVrGYQFB2vj7J
roSwN1XkYymtauPTZjWbzeWRlmggGij1aQFNg/KzY3e0dpgUPI7MvPr1ISM5srnZ
tbTQrFf2ElswoTCO1cXDc252gnZaEnzfRD7J6yz2Uv/1FRz/ZcoCeUbJrd5bcNI9
gJaOy0140GGG67YdmepcAbWy1NbAMZRBjaTQ+feEVkJxPcQ5fj1HkCDnLrcxJfw1
IhodVe4WKNHriAFGrSKrH3UKoN9DUmSTF9UDQkNA/s54dhBr1bEkne1mDll0afaX
jwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
HQ4EFgQUPT6LxrZCUpQ2jDmCMCDd6iULmwgwDQYJKoZIhvcNAQELBQADggEBAKis
hW6ldAV6cnve3waznZE5sOPyOXQXWDsltktsSpzYOhr59Y6wCj1/NoldQiv8QBxO
PaQwABR/q/sJumHD82pK4pl6W98VL1Oump8J1f1FWFj+2i2+NlmTS9GSqapdLu2h
OXMBz3BCdVVIT8DhseTA/mVyyQWXw/9lQ61OSusPnne0p0pnBFMMSUHMXVVxk++e
c7MLeUqqNlyb3RCxcUBESkXwwWHhXauTt99FB0yHSJy31wAM/jGyBavZaouLDkbq
05wpCwqC9zHAedITq8W9HNAP9CQcz7lWyCDxdgj+7hhxGkQJ2bjE0leZP5mzaEu2
7OaICVDtpa9OaWcqiIA=
-----END CERTIFICATE-----

File diff suppressed because it is too large Load Diff

18
charts/sentry/deploy.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=sentry
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
sentry stable/sentry \
| make_substitutions \
| kubectl apply -n $namespace -f -

18
charts/seq/deploy.sh Normal file
View File

@@ -0,0 +1,18 @@
#!/usr/bin/env bash
namespace=seq
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f values.yaml \
seq stable/seq \
| make_substitutions \
| kubectl apply -n $namespace -f -

154
charts/seq/values.yaml Normal file
View File

@@ -0,0 +1,154 @@
# Default values for Seq.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: datalust/seq
tag: 2020
pullPolicy: IfNotPresent
# By passing the value Y in the ACCEPT_EULA environment variable,
# you are expressing that you have read and accepted the terms in
# Seq End User License Agreement applicable to the Seq Docker image
# that you intend to use.
acceptEULA: "Y"
# Set this URL if you enable ingress and/or AAD authentication.
# Without this URL set to include HTTPS, Seq will try to set a login redirect
# URL with HTTP instead of HTTPS and AAD's registration requires HTTPS.
# The result is that you'll get an error during login:
# AADSTS50011: The reply url specified in the request does not match the reply urls configured for the application
# baseURI: https://my.public.url/
# The complete Seq API and UI.
# This API can accept events and serve API requests.
ui:
service:
port: 80
ingress:
enabled: true
path: /
hosts:
- seq.k2.local
# The ingestion-only API.
# This API is a subset of ui that can only ingest events.
ingestion:
service:
port: 5341
ingress:
enabled: false
path: /
hosts: []
# Accept events in the GELF format and forward them to Seq.
gelf:
enabled: false
image:
repository: datalust/sqelf
tag: 2
pullPolicy: IfNotPresent
service:
port: 12201
# GELF can be ingested through either TCP or UDP
protocol: TCP
service:
type: ClusterIP
ingress:
annotations:
cert-manager.io/cluster-issuer: ca-issuer
kubernetes.io/ingress.class: nginx
tls:
- secretName: seq-tls
hosts:
- seq.k2.local
resources: {}
# We recommend uncommenting these and specifying an explicit memory limit that
# suits your workload.
# limits:
# memory: 256Mi
# requests:
# memory: 256Mi
cache:
# The fraction of RAM that the cache should try fit within. Specifying a larger
# value may allow more events in RAM at the expense of potential instability.
# Setting it to `0` will disable the cache completely.
# 70% (`0.7`) is a good starting point for machines with up to ~8GB of RAM.
targetSize: 0.7
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
persistence:
enabled: true
## The path the volume will be mounted at
path: /data
## The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services.
subPath: ""
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## Seq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: managed-nfs-storage
accessMode: ReadWriteOnce
size: 8Gi
serviceAccount:
create: false
name:
## Enable RBAC
rbac:
create: false
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
create: false
securityContext:
privileged: true
## Configure probe values
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 10

17
charts/vault/deploy.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
namespace=vault
charts=()
. ../config.sh
kubectl create ns $namespace
kubectl_apply $namespace "${charts[@]}"
helm template \
-n $namespace \
-f vault-values.yaml \
vault hashicorp/vault \
| make_substitutions \
| kubectl apply -n $namespace -f -

View File

@@ -1,92 +0,0 @@
#!/usr/bin/env bash
TOP=@out@/share/kube-system-bootstrap
ca=@initca@
apiserver="@apiserver@"
filseserver="@fileserver@"
grafana_ldap_toml="@grafana_ldap_toml@"
apply_configs () {
d=$TOP/config
configs[0]=$d/cluster-auth-rbac.yaml
configs[1]=$d/kube-proxy.yaml
configs[2]=$d/front-proxy-client.yaml
configs[3]=$d/grafana-smtp-secret.yaml
[ ! -z $grafana_ldap_toml ] && configs[4]=$d/grafana-ldap-toml.yaml
kubectl delete secret cluster-ca -n kube-system >/dev/null 2>&1
kubectl create secret tls cluster-ca \
--namespace=kube-system --cert=${ca}/ca.pem --key=${ca}/ca-key.pem
for i in ${configs[@]}; do
kubectl apply -f $i
done
}
install_certmgr () {
kubectl apply -f https://raw.githubusercontent.com/jetstack/cert-manager/release-0.12/deploy/manifests/00-crds.yaml
helm repo add jetstack https://charts.jetstack.io
helm install -n kube-system -f $TOP/charts/cert-manager.yaml \
cert-manager jetstack/cert-manager
}
helm_install () {
echo "helm install $1"
helm install -n kube-system -f $TOP/charts/$1.yaml $1 stable/$1
}
helm_delete () {
echo "helm delete existing $1"
helm delete -n kube-system $1
}
install_prometheus () {
helm_delete prometheus-operator
yaml=/tmp/prometheus-operator.yaml
cp $TOP/charts/prometheus-operator.yaml $yaml
chmod 640 $yaml
# disable ldap for grafana
[ -z $grafana_ldap_toml ] && \
sed -i '/auth\.ldap:/,+1 s/true/false/; /ldap:/,+1 d' $yaml
# disable storage
[ -z $fileserver ] && \
sed -i '/prometheusSpec:/,+10d' $yaml
helm_install prometheus-operator $yaml
}
install_charts () {
[ ! -z $fileserver ] && charts[0]=nfs-client-provisioner
charts[1]=nginx-ingress
charts[2]=metrics-server
charts[3]=kubernetes-dashboard
for i in ${charts[@]};do
helm_install $i
sleep 30
done
}
install_prometheus_crds () {
url=https://raw.githubusercontent.com/helm/charts/master/stable/prometheus-operator/crds
kubectl apply -f $url/crd-alertmanager.yaml
kubectl apply -f $url/crd-prometheus.yaml
kubectl apply -f $url/crd-prometheusrules.yaml
kubectl apply -f $url/crd-servicemonitor.yaml
kubectl apply -f $url/crd-podmonitor.yaml
}
helm repo add stable https://kubernetes-charts.storage.googleapis.com
helm repo update
apply_configs
install_prometheus_crds
install_certmgr
install_charts
install_prometheus
# helm install -n kube-system -f sentry.yaml --wait --timeout=1000s sentry stable/sentry
# helm install -n vault -f vault-values.yaml vault hashicorp/vault
# helm install -n monitoring -f kube-prometheus-stack.yaml prometheus prometheus-community/kube-prometheus-stack
# vim:ft=sh

View File

@@ -1,140 +0,0 @@
# Default values for coredns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: coredns/coredns
tag: "1.6.4"
pullPolicy: IfNotPresent
replicaCount: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
serviceType: "ClusterIP"
prometheus:
monitor:
enabled: true
additionalLabels: {}
namespace: ""
service:
clusterIP: 10.0.0.254
# loadBalancerIP: ""
# externalTrafficPolicy: ""
# annotations:
# prometheus.io/scrape: "true"
# prometheus.io/port: "9153"
serviceAccount:
create: true
# The name of the ServiceAccount to use
# If not set and create is true, a name is generated using the fullname template
name:
rbac:
# If true, create & use RBAC resources
create: true
# If true, create and use PodSecurityPolicy
pspEnable: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name:
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
isClusterService: true
# Optional priority class to be used for the coredns pods
priorityClassName: ""
servers:
- zones:
- zone: .
port: 53
plugins:
- name: cache
parameters: 30
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: @cluster@.local
- name: loadbalance
parameters: round_robin
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
# Complete example with all the options:
# - zones: # the `zones` block can be left out entirely, defaults to "."
# - zone: hello.world. # optional, defaults to "."
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
# - zone: foo.bar.
# scheme: dns://
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
# # Note that this will not work if you are also exposing tls or grpc on the same server
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
# plugins: # the plugins to use for this server block
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
# parameters: foo bar # list of parameters after the plugin
# configBlock: |- # if the plugin supports extra block style config, supply it here
# hello world
# foo bar
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
zoneFiles: []
# - filename: example.db
# domain: example.com
# contents: |
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
# example.com. IN NS b.iana-servers.net.
# example.com. IN NS a.iana-servers.net.
# example.com. IN A 192.168.99.102
# *.example.com. IN A 192.168.99.102
# optonal array of secrets to mount inside coredns container
# possible usecase: need for secure connection with etcd backend
extraSecrets: []
# - name: etcd-client-certs
# mountPath: /etc/coredns/tls/etcd
# - name: some-fancy-secret
# mountPath: /etc/wherever

View File

@@ -1,45 +0,0 @@
# Default values for kubernetes-dashboard
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
## Enable possibility to skip login
enableSkipLogin: false
## Serve application over HTTP without TLS
enableInsecureLogin: false
## Additional container arguments
extraArgs:
- --token-ttl=0
rbac:
clusterAdminRole: true
service:
type: ClusterIP
externalPort: 443
annotations:
service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}'
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
nginx.org/ssl-services: kubernetes-dashboard
nginx.ingress.kubernetes.io/secure-backends: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# nginx.ingress.kubernetes.io/ssl-passthrough: "false"
paths:
- /
hosts:
- dashboard.@cluster@.local
tls:
- hosts:
- dashboard.@cluster@.local
secretName: kubernetes-dashboard-tls-cert

View File

@@ -1,69 +0,0 @@
## nginx configuration
## Ref: https://github.com/kubernetes/ingress/blob/master/controllers/nginx/configuration.md
##
controller:
kind: Deployment
hostNetwork: true
service:
type: NodePort
nodePorts:
http: 30080
https: 30443
targetPorts:
http: http
https: https
tolerations:
- key: node-role.kubernetes.io/master
operator: Equal
value: "true"
effect: NoSchedule
- key: unschedulable
operator: Equal
value: "true"
effect: NoSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: [ @apiserver@ ]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- nginx-ingress
- key: component
operator: In
values:
- controller
topologyKey: kubernetes.io/hostname
namespaces: []
replicaCount: 1
minAvailable: 1
metrics:
enabled: true
serviceMonitor.enabled: true
# TCP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp
##
tcp: {}
# 8080: "default/example-tcp-svc:9000"
# UDP service key:value pairs
# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp
##
udp: {}
# 53: "kube-system/kube-dns:53"

View File

@@ -1,169 +0,0 @@
alertmanager:
## Deploy alertmanager
##
enabled: true
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
## Hosts must be provided if Ingress is enabled.
##
hosts:
- alertmanager.@cluster@.local
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
##
paths: []
# - /
## TLS configuration for Alertmanager Ingress
## Secret must be manually created in the namespace
##
tls:
- secretName: alertmanager-general-tls
hosts:
- alertmanager.@cluster@.local
grafana:
enabled: true
defaultDashboardsEnabled: true
adminPassword: prom-operator
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
hosts:
- grafana.@cluster@.local
path: /
tls:
- secretName: grafana-general-tls
hosts:
- grafana.@cluster@.local
grafana.ini:
paths:
data: /var/lib/grafana/data
logs: /var/log/grafana
plugins: /var/lib/grafana/plugins
provisioning: /etc/grafana/provisioning
analytics:
check_for_updates: true
log:
mode: console
grafana_net:
url: https://grafana.net
## LDAP Authentication can be enabled with the following values on grafana.ini
## NOTE: Grafana will fail to start if the value for ldap.toml is invalid
auth.ldap:
enabled: true
allow_sign_up: true
config_file: /etc/grafana/ldap.toml
smpt:
enabled: true
host: smtpgw.itpartner.no
port: 465
user: utvikling
skip_verify: true
## Grafana's LDAP configuration
## Templated by the template in _helpers.tpl
## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled
## ref: http://docs.grafana.org/installation/configuration/#auth-ldap
## ref: http://docs.grafana.org/installation/ldap/#configuration
ldap:
existingSecret: grafana-ldap-toml
## Grafana's SMTP configuration
## NOTE: To enable, grafana.ini must be configured with smtp.enabled
## ref: http://docs.grafana.org/installation/configuration/#smtp
smtp:
# `existingSecret` is a reference to an existing secret containing the smtp configuration
# for Grafana.
existingSecret: grafana-smtp
userKey: user
passwordKey: password
kubeApiServer:
enabled: true
tlsConfig:
insecureSkipVerify: true
kubelet:
enabled: true
namespace: kube-system
coreDns:
enabled: true
kubeEtcd:
enabled: true
serviceMonitor:
insecureSkipVerify: true
endpoints:
- @apiserverAddress@
kubeControllerManager:
enabled: true
serviceMonitor:
insecureSkipVerify: true
endpoints:
- @apiserverAddress@
kubeScheduler:
enabled: true
serviceMonitor:
insecureSkipVerify: true
endpoints:
- @apiserverAddress@
kubeProxy:
enabled: false
endpoints:
- @apiserverAddress@
@workers@
kubeStateMetrics:
enabled: true
nodeExporter:
enabled: true
prometheusOperator:
enabled: true
prometheus:
enabled: true
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: ca-issuer
hosts:
- prometheus.@cluster@.local
paths: []
tls:
- secretName: prometheus-general-tls
hosts:
- prometheus.@cluster@.local
prometheusSpec:
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: managed-nfs-storage
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
selector: {}

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
TOP=@out@/share/kubernetes-config
dest=kubernetes-config
[ $# = 1 ] && dest=$1
cp -r $TOP $dest
chmod -R ug+w $dest
# vim:ft=sh

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env bash
repos=(
"jetstack=https://charts.jetstack.io"
"stable=https://kubernetes-charts.storage.googleapis.com/"
"minio=https://helm.min.io/"
"anchore=https://charts.anchore.io"
"prometheus-community=https://prometheus-community.github.io/helm-charts"
"bitnami=https://charts.bitnami.com/bitnami"
"hashicorp=https://helm.releases.hashicorp.com"
"ingress-nginx=https://kubernetes.github.io/ingress-nginx"
)
update_helm_repos () {
for i in ${repos[@]}; do
k=$(echo "$i" | cut -d= -f1)
v=$(echo "$i" | cut -d= -f2)
helm repo add $k $v
done
helm repo update
}
update_helm_repos