Totally revamp cluster chart configs

This commit is contained in:
Jonas Juselius
2020-11-05 21:05:34 +01:00
parent 6fea8b3bc8
commit 3a69e7f1f1
55 changed files with 3921 additions and 531 deletions

154
charts/seq/values.yaml Normal file
View File

@@ -0,0 +1,154 @@
# Default values for Seq.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: datalust/seq
tag: 2020
pullPolicy: IfNotPresent
# By passing the value Y in the ACCEPT_EULA environment variable,
# you are expressing that you have read and accepted the terms in
# Seq End User License Agreement applicable to the Seq Docker image
# that you intend to use.
acceptEULA: "Y"
# Set this URL if you enable ingress and/or AAD authentication.
# Without this URL set to include HTTPS, Seq will try to set a login redirect
# URL with HTTP instead of HTTPS and AAD's registration requires HTTPS.
# The result is that you'll get an error during login:
# AADSTS50011: The reply url specified in the request does not match the reply urls configured for the application
# baseURI: https://my.public.url/
# The complete Seq API and UI.
# This API can accept events and serve API requests.
ui:
service:
port: 80
ingress:
enabled: true
path: /
hosts:
- seq.k2.local
# The ingestion-only API.
# This API is a subset of ui that can only ingest events.
ingestion:
service:
port: 5341
ingress:
enabled: false
path: /
hosts: []
# Accept events in the GELF format and forward them to Seq.
gelf:
enabled: false
image:
repository: datalust/sqelf
tag: 2
pullPolicy: IfNotPresent
service:
port: 12201
# GELF can be ingested through either TCP or UDP
protocol: TCP
service:
type: ClusterIP
ingress:
annotations:
cert-manager.io/cluster-issuer: ca-issuer
kubernetes.io/ingress.class: nginx
tls:
- secretName: seq-tls
hosts:
- seq.k2.local
resources: {}
# We recommend uncommenting these and specifying an explicit memory limit that
# suits your workload.
# limits:
# memory: 256Mi
# requests:
# memory: 256Mi
cache:
# The fraction of RAM that the cache should try fit within. Specifying a larger
# value may allow more events in RAM at the expense of potential instability.
# Setting it to `0` will disable the cache completely.
# 70% (`0.7`) is a good starting point for machines with up to ~8GB of RAM.
targetSize: 0.7
nodeSelector: {}
tolerations: []
affinity: {}
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
persistence:
enabled: true
## The path the volume will be mounted at
path: /data
## The subdirectory of the volume to mount to, useful in dev environments and one PV for multiple services.
subPath: ""
## A manually managed Persistent Volume and Claim
## Requires persistence.enabled: true
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
## Seq data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: managed-nfs-storage
accessMode: ReadWriteOnce
size: 8Gi
serviceAccount:
create: false
name:
## Enable RBAC
rbac:
create: false
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
create: false
securityContext:
privileged: true
## Configure probe values
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
livenessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
enabled: true
failureThreshold: 3
initialDelaySeconds: 0
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
startupProbe:
enabled: true
failureThreshold: 30
periodSeconds: 10