From 3a69e7f1f1ec52861a851464dc6c223cc6b587d6 Mon Sep 17 00:00:00 2001 From: Jonas Juselius Date: Thu, 5 Nov 2020 21:05:34 +0100 Subject: [PATCH] Totally revamp cluster chart configs --- charts/anchore/anchore.yaml | 439 ++++ charts/busynix/busynix.yaml | 29 + charts/busynix/deploy.sh | 12 + charts/cert-manager/cluster-ca.yaml | 9 + .../cert-manager}/cluster-issuer.yaml | 0 charts/cert-manager/deploy.sh | 21 + .../cert-manager}/front-proxy-client.yaml | 0 .../cert-manager/values.yaml | 0 charts/config.sh | 34 + charts/ingress-nginx/deploy.sh | 19 + charts/ingress-nginx/values.yaml | 688 ++++++ .../kube-system}/cluster-auth-rbac.yaml | 0 charts/kube-system/deploy.sh | 16 + .../kube-system}/kube-proxy.yaml | 7 +- .../dashboard-ingress.yaml | 4 +- charts/kubernetes-dashboard/deploy.sh | 17 + charts/metrics-server/deploy.sh | 19 + .../metrics-server/values.yaml | 0 charts/minio/deploy.sh | 18 + .../minio.yaml => charts/minio/values.yaml | 0 charts/nfs-client-provisioner/deploy.sh | 18 + .../nfs-client-provisioner/values.yaml | 0 charts/prometheus/app-servicemonitor.yaml | 19 + charts/prometheus/deploy.sh | 57 + charts/prometheus/etcd-cert-secret.yaml | 12 + .../gitlab-redis-servicemonitor.yaml | 20 + charts/prometheus/gitlab-servicemonitor.yaml | 21 + .../prometheus}/grafana-ldap-toml.yaml | 0 .../prometheus}/grafana-smtp-secret.yaml | 0 .../ingress-nginx-servicemonitor.yaml | 23 + charts/prometheus/prometheus-etcd-cert.yaml | 81 + charts/prometheus/values.yaml | 2115 +++++++++++++++++ charts/sentry/deploy.sh | 18 + .../sentry}/sentry-postgres-secret.yaml | 0 .../sentry.yaml => charts/sentry/values.yaml | 0 charts/seq/deploy.sh | 18 + charts/seq/values.yaml | 154 ++ charts/vault/deploy.sh | 17 + .../vault/values.yaml | 0 .../bin/initial-kube-system-bootstrap | 92 - kubernetes-config/charts/coredns.yaml | 140 -- .../charts/kubernetes-dashboard.yaml | 45 - kubernetes-config/charts/nginx-ingress.yaml | 69 - .../charts/prometheus-operator.yaml | 169 -- kubernetes-config/copy-kubernetes-config | 9 - .../bin => scripts}/config-namespace.sh | 0 .../bin => scripts}/docker-prune-stopped.fish | 0 .../bin => scripts}/gitlab-prune-registry.sh | 0 .../bin => scripts}/reset-sa-tokens.sh | 0 .../bin => scripts}/restart-flannel.sh | 0 .../bin => scripts}/restart-kubernetes.sh | 0 .../bin => scripts}/setup-helm.sh | 0 .../bin => scripts}/taint-node-no-schedule.sh | 0 scripts/update-helm-repos.sh | 23 + {kubernetes-config/bin => scripts}/ws-curl.sh | 0 55 files changed, 3921 insertions(+), 531 deletions(-) create mode 100644 charts/anchore/anchore.yaml create mode 100644 charts/busynix/busynix.yaml create mode 100755 charts/busynix/deploy.sh create mode 100644 charts/cert-manager/cluster-ca.yaml rename {kubernetes-config/config => charts/cert-manager}/cluster-issuer.yaml (100%) create mode 100644 charts/cert-manager/deploy.sh rename {kubernetes-config/config => charts/cert-manager}/front-proxy-client.yaml (100%) rename kubernetes-config/charts/cert-manager.yaml => charts/cert-manager/values.yaml (100%) create mode 100755 charts/config.sh create mode 100755 charts/ingress-nginx/deploy.sh create mode 100644 charts/ingress-nginx/values.yaml rename {kubernetes-config/config => charts/kube-system}/cluster-auth-rbac.yaml (100%) create mode 100755 charts/kube-system/deploy.sh rename {kubernetes-config/config => charts/kube-system}/kube-proxy.yaml (95%) rename {kubernetes-config/config => charts/kubernetes-dashboard}/dashboard-ingress.yaml (88%) create mode 100755 charts/kubernetes-dashboard/deploy.sh create mode 100755 charts/metrics-server/deploy.sh rename kubernetes-config/charts/metrics-server.yaml => charts/metrics-server/values.yaml (100%) create mode 100644 charts/minio/deploy.sh rename kubernetes-config/charts/minio.yaml => charts/minio/values.yaml (100%) create mode 100755 charts/nfs-client-provisioner/deploy.sh rename kubernetes-config/charts/nfs-client-provisioner.yaml => charts/nfs-client-provisioner/values.yaml (100%) create mode 100644 charts/prometheus/app-servicemonitor.yaml create mode 100644 charts/prometheus/deploy.sh create mode 100644 charts/prometheus/etcd-cert-secret.yaml create mode 100644 charts/prometheus/gitlab-redis-servicemonitor.yaml create mode 100644 charts/prometheus/gitlab-servicemonitor.yaml rename {kubernetes-config/config => charts/prometheus}/grafana-ldap-toml.yaml (100%) rename {kubernetes-config/config => charts/prometheus}/grafana-smtp-secret.yaml (100%) create mode 100644 charts/prometheus/ingress-nginx-servicemonitor.yaml create mode 100644 charts/prometheus/prometheus-etcd-cert.yaml create mode 100644 charts/prometheus/values.yaml create mode 100644 charts/sentry/deploy.sh rename {kubernetes-config/config => charts/sentry}/sentry-postgres-secret.yaml (100%) rename kubernetes-config/charts/sentry.yaml => charts/sentry/values.yaml (100%) create mode 100644 charts/seq/deploy.sh create mode 100644 charts/seq/values.yaml create mode 100755 charts/vault/deploy.sh rename kubernetes-config/charts/vault-values.yaml => charts/vault/values.yaml (100%) delete mode 100755 kubernetes-config/bin/initial-kube-system-bootstrap delete mode 100644 kubernetes-config/charts/coredns.yaml delete mode 100644 kubernetes-config/charts/kubernetes-dashboard.yaml delete mode 100644 kubernetes-config/charts/nginx-ingress.yaml delete mode 100644 kubernetes-config/charts/prometheus-operator.yaml delete mode 100755 kubernetes-config/copy-kubernetes-config rename {kubernetes-config/bin => scripts}/config-namespace.sh (100%) rename {kubernetes-config/bin => scripts}/docker-prune-stopped.fish (100%) rename {kubernetes-config/bin => scripts}/gitlab-prune-registry.sh (100%) rename {kubernetes-config/bin => scripts}/reset-sa-tokens.sh (100%) rename {kubernetes-config/bin => scripts}/restart-flannel.sh (100%) rename {kubernetes-config/bin => scripts}/restart-kubernetes.sh (100%) rename {kubernetes-config/bin => scripts}/setup-helm.sh (100%) rename {kubernetes-config/bin => scripts}/taint-node-no-schedule.sh (100%) create mode 100644 scripts/update-helm-repos.sh rename {kubernetes-config/bin => scripts}/ws-curl.sh (100%) diff --git a/charts/anchore/anchore.yaml b/charts/anchore/anchore.yaml new file mode 100644 index 0000000..9765c6d --- /dev/null +++ b/charts/anchore/anchore.yaml @@ -0,0 +1,439 @@ +# Default values for anchore_engine chart. + +# Anchore engine has a dependency on Postgresql, configure here +postgresql: + # To use an external DB or Google CloudSQL in GKE, uncomment & set 'enabled: false' + # externalEndpoint, postgresUser, postgresPassword & postgresDatabase are required values for external postgres + # enabled: false + postgresUser: anchoreengine + postgresPassword: KebabNinja2020 + postgresDatabase: anchore + + # Specify an external (already existing) postgres deployment for use. + # Set to the host and port. eg. mypostgres.myserver.io:5432 + externalEndpoint: Null + + # Configure size of the persistent volume used with helm managed chart. + # This should be commented out if using an external endpoint. + persistence: + storageClass: managed-nfs-storage + resourcePolicy: nil + size: 20Gi + +ingress: + enabled: true + labels: {} + # Exposing the feeds API w/ ingress is for special cases only, uncomment feedsPath if external access to the feeds API is needed + # feedsPath: /v1/feeds/ + apiPath: /v1/ + uiPath: / + + # Uncomment the following lines to bind on specific hostnames + # apiHosts: + # - anchore-api.example.com + # uiHosts: + # - anchore-ui.example.com + # feedsHosts: + # - anchore-feeds.example.com + annotations: + kubernetes.io/ingress.class: nginx + certmanager.io/cluster-issuer: ca-issuer + nginx.ingress.kubernetes.io/ssl-redirect: "true" + tls: + - secretName: anchore-tls + hosts: + - anchore.k2.local + +# Global configuration shared by all anchore-engine services. +anchoreGlobal: + # Image used for all anchore engine deployments (excluding enterprise components). + image: docker.io/anchore/anchore-engine:v0.8.1 + imagePullPolicy: IfNotPresent + # Set image pull secret name if using an anchore-engine image from a private registry + imagePullSecretName: + + # Set this value to True to setup the chart for OpenShift deployment compatibility. + openShiftDeployment: False + + # Add additionnal labels to all kubernetes resources + labels: {} + # app.kubernetes.io/managed-by: Helm + # foo: bar + + # Set extra environment variables. These will be set on all containers. + extraEnv: [] + # - name: foo + # value: bar + + # Specifies an existing secret to be used for admin and db passwords + existingSecret: Null + + # The scratchVolume controls the mounting of an external volume for scratch space for image analysis. Generally speaking + # you need to provision 3x the size of the largest image (uncompressed) that you want to analyze for this space. + scratchVolume: + mountPath: /analysis_scratch + details: + # Specify volume configuration here + emptyDir: {} + + # A secret must be created in the same namespace as anchore-engine is deployed, containing the certificates & public/private keys used for SSL, SAML & custom CAs. + # Certs and keys should be added using the file name the certificate is stored at. This secret will be mounted to /home/anchore/certs. + certStoreSecretName: Null + + # Specify your pod securityContext here, by default the anchore images utilize the user/group 'anchore' using uid/gid 1000 + # To disable this securityContext comment out `runAsUser` & `runAsGroup` + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + + ### + # Start of General Anchore Engine Configurations (populates /config/config.yaml) + ### + # Set where default configs are placed at startup. This must be a writable location for the pod. + serviceDir: /anchore_service + logLevel: INFO + cleanupImages: true + + # Define timeout, in seconds, for image analysis + imageAnalyzeTimeoutSeconds: 36000 + + # If true, when a user adds an ECR registry with username = awsauto then the system will look for an instance profile to use for auth against the registry + allowECRUseIAMRole: false + + # Enable prometheus metrics + enableMetrics: true + + # Disable auth on prometheus metrics + metricsAuthDisabled: false + + # Sets the password & email address for the default anchore-engine admin user. + defaultAdminPassword: KebabNinja2020 + defaultAdminEmail: jonas.juselius@tromso.serit.no + + saml: + # Locations for keys used for signing and encryption. Only one of 'secret' or 'public_key_path'/'private_key_path' needs to be set. If all are set then the keys take precedence over the secret value + # Secret is for a shared secret and if set, all components in anchore should have the exact same value in their configs. + secret: Null + privateKeyName: Null + publicKeyName: Null + + oauthEnabled: false + oauthTokenExpirationSeconds: 3600 + + # Set this to True to enable storing user passwords only as secure hashes in the db. This can dramatically increase CPU usage if you + # don't also use oauth and tokens for internal communications (which requires keys/secret to be configured as well) + # WARNING: you should not change this after a system has been initialized as it may cause a mismatch in existing passwords + hashedPasswords: false + + # Configure the database connection within anchore-engine & enterprise-ui. This may get split into 2 different configurations based on service utilized. + dbConfig: + timeout: 120 + # Use ssl, but the default postgresql config in helm's stable repo does not support ssl on server side, so this should be set for external dbs only. + # All ssl dbConfig values are only utilized when ssl=true + ssl: false + sslMode: verify-full + # sslRootCertName is the name of the postgres root CA certificate stored in anchoreGlobal.certStoreSecretName + sslRootCertName: Null + connectionPoolSize: 30 + connectionPoolMaxOverflow: 100 + + internalServicesSsl: + # Enable to force all anchore-engine services to communicate internally using SSL + enabled: false + # specify whether cert is verfied against the local certifacte bundle (allow self-signed certs if set to false) + verifyCerts: false + certSecretKeyName: Null + certSecretCertName: Null + + # To enable webhooks, set webhooksEnabled: true + webhooksEnabled: true + # Configure webhook outputs here. The service provides these webhooks for notifying external systems of updates + webhooks: + # User and password to be set (using HTTP basic auth) on all webhook calls if necessary + webhook_user: Null + webhook_pass: Null + ssl_verify: false + + # Endpoint for general notification delivery. These events are image/tag updates etc. This is globally configured + # and updates for all users are sent to the same host but with a different path for each user. + # / are required as documented at end of URI - only hostname:port should be configured. + general: + url: http://busynix.default + # url: "http://somehost:9090//" + + # Allow configuration of Kubernetes probes + probes: + liveness: + initialDelaySeconds: 120 + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 6 + successThreshold: 1 + readiness: + timeoutSeconds: 10 + periodSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + +# Configuration for the analyzer pods that perform image analysis +# There may be many of these analyzers but best practice is to not have more than one per node since analysis +# is very IO intensive. Use of affinity/anti-affinity rules for scheduling the analyzers is future work. +anchoreAnalyzer: + replicaCount: 1 + containerPort: 8084 + + # Set extra environment variables. These will be set only on analyzer containers. + extraEnv: [] + # - name: foo + # value: bar + + # The cycle timer is the interval between checks to the work queue for new jobs + cycleTimers: + image_analyzer: 5 + + # Controls the concurrency of the analyzer itself. Can be configured to process more than one task at a time, but it IO bound, so may not + # necessarily be faster depending on hardware. Should test and balance this value vs. number of analyzers for your deployment cluster performance. + concurrentTasksPerWorker: 1 + + # Image layer caching can be enabled to speed up image downloads before analysis. + # This chart sets up a scratch directory for all analyzer pods using the values found at anchoreGlobal.scratchVolume. + # When setting anchoreAnalyzer.layerCacheMaxGigabytes, ensure the scratch volume has suffient storage space. + # For more info see - https://docs.anchore.com/current/docs/engine/engine_installation/storage/layer_caching/ + # Enable image layer caching by setting a cache size > 0GB. + layerCacheMaxGigabytes: 0 + + # Enable the ability to read a user-supplied 'hints' file to allow users to override and/or augment the software artifacts that are discovered by anchore during its image analysis process. + # Once enabled, the analyzer services will look for a file with a specific name, location and format located within the container image - /anchore_hints.json + # For more info see - https://docs.anchore.com/current/docs/engine/engine_installation/configuration/content_hints + enableHints: false + + configFile: + # Anchore analyzer config file + # + # WARNING - malforming this file can cause the analyzer to fail on all image analysis + # + # Options for any analyzer module(s) that takes customizable input + # + # example configuration for the 'retrieve_files' analyzer, if installed + retrieve_files: + file_list: + - '/etc/passwd' + # - '/etc/services' + # - '/etc/sudoers' + + # example configuration for the 'content_search' analyze, if installed + secret_search: + match_params: + - MAXFILESIZE=10000 + - STOREONMATCH=n + regexp_match: + - "AWS_ACCESS_KEY=(?i).*aws_access_key_id( *=+ *).*(? uid 101 + runAsUser: 101 + allowPrivilegeEscalation: true + + # Configures the ports the nginx-controller listens on + containerPort: + http: 80 + https: 443 + + # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + + ## Annotations to be added to the controller config configuration configmap + ## + configAnnotations: {} + + # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers + proxySetHeaders: {} + + # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + + # Optionally customize the pod dnsConfig. + dnsConfig: {} + + # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + + # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + + # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: true + + ## Use host ports 80 and 443 + ## Disabled by default + ## + hostPort: + enabled: false + ports: + http: 80 + https: 443 + + ## Election ID to use for status update + ## + electionID: ingress-controller-leader + + ## Name of the ingress class to route through this controller + ## + ingressClass: nginx + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Security Context policies for controller pods + ## + podSecurityContext: {} + + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ### + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + + ## Allows customization of the source of the IP address or FQDN to report + ## in the ingress status field. By default, it reads the information provided + ## by the service. If disable, the status field reports the IP address of the + ## node or nodes where an ingress controller pod is running. + publishService: + enabled: true + ## Allows overriding of the publish service to bind to + ## Must be / + ## + pathOverride: "" + + ## Limit the scope of the controller + ## + scope: + enabled: false + namespace: "" # defaults to .Release.Namespace + + ## Allows customization of the configmap / nginx-configmap namespace + ## + configMapNamespace: "" # defaults to .Release.Namespace + + ## Allows customization of the tcp-services-configmap + ## + tcp: + configMapNamespace: "" # defaults to .Release.Namespace + ## Annotations to be added to the tcp config configmap + annotations: {} + + ## Allows customization of the udp-services-configmap + ## + udp: + configMapNamespace: "" # defaults to .Release.Namespace + ## Annotations to be added to the udp config configmap + annotations: {} + + ## Additional command line arguments to pass to nginx-ingress-controller + ## E.g. to specify the default SSL certificate you can use + ## extraArgs: + ## default-ssl-certificate: "/" + extraArgs: {} + + ## Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + ## DaemonSet or Deployment + ## + kind: Deployment + + ## Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + ## Labels to be added to the controller Deployment or DaemonSet + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + + # The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # minReadySeconds to avoid killing pods before we are ready + ## + minReadySeconds: 0 + + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + value: "true" + - effect: NoSchedule + key: unschedulable + operator: Equal + value: "true" + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Affinity and anti-affinity + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: [ @ingress_nodes@ ] + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - ingress-nginx + - key: app.kubernetes.io/instance + operator: In + values: + - ingress-nginx + - key: app.kubernetes.io/component + operator: In + values: + - controller + topologyKey: "kubernetes.io/hostname" + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - ingress-nginx + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + + ## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: failure-domain.beta.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: ingress-nginx-internal + + ## terminationGracePeriodSeconds + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + + ## Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: + kubernetes.io/os: linux + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + port: 10254 + + # Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + + ## Annotations to be added to controller pods + ## + podAnnotations: {} + + replicaCount: @ingress_replicas@ + + minAvailable: 3 + + # Define requests resources to avoid probe issues due to CPU utilization in busy nodes + # ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + # Ideally, there should be no limits. + # https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + # limits: + # cpu: 100m + # memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + ## Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + + service: + enabled: true + + annotations: {} + labels: {} + # clusterIP: "" + + ## List of IP addresses at which the controller services are available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + + enableHttp: true + enableHttps: true + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + # sessionAffinity: "" + + # specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, + # the service controller allocates a port from your cluster’s NodePort range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + ports: + http: 80 + https: 443 + + targetPorts: + http: http + https: https + + type: ClusterIP + + # type: NodePort + # nodePorts: + # http: 32080 + # https: 32443 + # tcp: + # 8080: 32808 + nodePorts: + http: 30080 + https: 30443 + tcp: {} + udp: {} + + ## Enables an additional internal load balancer (besides the external one). + ## Annotations are mandatory for the load balancer to come up. Varies with the cloud service. + internal: + enabled: false + annotations: {} + + ## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. + loadBalancerSourceRanges: [] + + ## Set external traffic policy to: "Local" to preserve source IP on + ## providers supporting it + ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer + # externalTrafficPolicy: "" + + extraContainers: [] + ## Additional containers to be added to the controller pod. + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + extraVolumeMounts: [] + ## Additional volumeMounts to the controller main container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the controller pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraInitContainers: [] + ## Containers, which are run before the app containers are started. + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + admissionWebhooks: + annotations: {} + enabled: true + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + + patch: + enabled: true + image: + repository: docker.io/jettech/kube-webhook-certgen + tag: v1.5.0 + pullPolicy: IfNotPresent + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + tolerations: [] + runAsUser: 2000 + + metrics: + port: 10254 + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: true + + service: + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + + # clusterIP: "" + + ## List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9913 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + + serviceMonitor: + enabled: true + additionalLabels: {} + namespace: "" + namespaceSelector: {} + # Default: scrape .Release.Namespace only + # To scrape all, use the following: + # namespaceSelector: + # any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + metricRelabelings: [] + + prometheusRule: + enabled: false + additionalLabels: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + + ## Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + ## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + ## to 300, allowing the draining of connections up to five minutes. + ## If the active connections end before that, the pod will terminate gracefully at that time. + ## To effectively take advantage of this feature, the Configmap feature + ## worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + + priorityClassName: "" + +## Rollback limit +## +revisionHistoryLimit: 10 + +# Maxmind license key to download GeoLite2 Databases +# https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases +maxmindLicenseKey: "" + +## Default 404 backend +## +defaultBackend: + ## + enabled: true + + image: + repository: k8s.gcr.io/defaultbackend-amd64 + tag: "1.5" + pullPolicy: IfNotPresent + # nobody user -> uid 65534 + runAsUser: 65534 + + extraArgs: {} + + serviceAccount: + create: true + name: + ## Additional environment variables to set for defaultBackend pods + extraEnvs: [] + + port: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + affinity: {} + + ## Security Context policies for controller pods + ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for + ## notes on enabling and using sysctls + ## + podSecurityContext: {} + + # labels to add to the pod container metadata + podLabels: {} + # key: value + + ## Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to default backend pods + ## + podAnnotations: {} + + replicaCount: 1 + + minAvailable: 1 + + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + service: + annotations: {} + + # clusterIP: "" + + ## List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + + priorityClassName: "" + +## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 +rbac: + create: true + scope: false + +# If true, create & use Pod Security Policy resources +# https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +podSecurityPolicy: + enabled: false + +serviceAccount: + create: true + name: + +## Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# TCP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp +## +tcp: {} +# 8080: "default/example-tcp-svc:9000" + +# UDP service key:value pairs +# Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp +## +udp: {} +# 53: "kube-system/kube-dns:53" + diff --git a/kubernetes-config/config/cluster-auth-rbac.yaml b/charts/kube-system/cluster-auth-rbac.yaml similarity index 100% rename from kubernetes-config/config/cluster-auth-rbac.yaml rename to charts/kube-system/cluster-auth-rbac.yaml diff --git a/charts/kube-system/deploy.sh b/charts/kube-system/deploy.sh new file mode 100755 index 0000000..f7c7e91 --- /dev/null +++ b/charts/kube-system/deploy.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +namespace=kube-system + +charts=( + cluster-auth-rbac.yaml + kube-proxy.yaml +) + +. ../config.sh + +kubectl_apply $namespace "${charts[@]}" + +kubectl delete secret cluster-ca -n $namespace +kubectl create secret tls cluster-ca \ + --namespace=$namespace --cert=$initca/ca.pem --key=$initca/ca-key.pem diff --git a/kubernetes-config/config/kube-proxy.yaml b/charts/kube-system/kube-proxy.yaml similarity index 95% rename from kubernetes-config/config/kube-proxy.yaml rename to charts/kube-system/kube-proxy.yaml index 0bfe7cc..548cb74 100644 --- a/kubernetes-config/config/kube-proxy.yaml +++ b/charts/kube-system/kube-proxy.yaml @@ -17,8 +17,7 @@ kind: ClusterRole metadata: name: kube-proxy-role rules: - - - apiGroups: + - apiGroups: - "" resources: - endpoints @@ -28,9 +27,7 @@ rules: verbs: ["get", "watch", "list"] - nonResourceURLs: ["*"] verbs: ["get", "watch", "list"] - - - - apiGroups: + - apiGroups: - "" resources: - events diff --git a/kubernetes-config/config/dashboard-ingress.yaml b/charts/kubernetes-dashboard/dashboard-ingress.yaml similarity index 88% rename from kubernetes-config/config/dashboard-ingress.yaml rename to charts/kubernetes-dashboard/dashboard-ingress.yaml index 4acf8e2..999b422 100644 --- a/kubernetes-config/config/dashboard-ingress.yaml +++ b/charts/kubernetes-dashboard/dashboard-ingress.yaml @@ -12,7 +12,7 @@ metadata: nginx.ingress.kubernetes.io/ssl-redirect: "true" spec: rules: - - host: dashboard.k2.itpartner.no + - host: dashboard.@cluster@.itpartner.no http: paths: - backend: @@ -20,5 +20,5 @@ spec: servicePort: 443 tls: - hosts: - - dashboard.k2.itpartner.no + - dashboard.@cluster@.itpartner.no secretName: kubernetes-dashboard-tls diff --git a/charts/kubernetes-dashboard/deploy.sh b/charts/kubernetes-dashboard/deploy.sh new file mode 100755 index 0000000..e9ff75e --- /dev/null +++ b/charts/kubernetes-dashboard/deploy.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +version="v2.0.4" +namespace=kubernetes-dashboard + +charts=( + dashboard-ingress.yaml +) + +. ../config.sh + +kubectl create ns $namespace +kubectl_apply $namespace "${charts[@]}" +kubectl apply \ + -n $namespace \ + -f https://raw.githubusercontent.com/kubernetes/dashboard/$version/aio/deploy/recommended.yaml + diff --git a/charts/metrics-server/deploy.sh b/charts/metrics-server/deploy.sh new file mode 100755 index 0000000..c836ef3 --- /dev/null +++ b/charts/metrics-server/deploy.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +namespace=kube-system + +charts=() + +. ../config.sh + +kubectl_apply $namespace "${charts[@]}" +helm template \ + -n $namespace \ + -f values.yaml \ + metrics-server \ + stable/metrics-server \ + | make_substitutions \ + | sed 's/8443/6443/g' \ + | kubectl apply -f - + + diff --git a/kubernetes-config/charts/metrics-server.yaml b/charts/metrics-server/values.yaml similarity index 100% rename from kubernetes-config/charts/metrics-server.yaml rename to charts/metrics-server/values.yaml diff --git a/charts/minio/deploy.sh b/charts/minio/deploy.sh new file mode 100644 index 0000000..dd480ff --- /dev/null +++ b/charts/minio/deploy.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +namespace=minio + +charts=() + +. ../config.sh + +kubectl create ns $namespace +kubectl_apply $namespace "${charts[@]}" + +helm template \ + -n $namespace \ + -f values.yaml \ + minio minio/minio \ + | make_substitutions \ + | kubectl apply -n $namespace -f - + diff --git a/kubernetes-config/charts/minio.yaml b/charts/minio/values.yaml similarity index 100% rename from kubernetes-config/charts/minio.yaml rename to charts/minio/values.yaml diff --git a/charts/nfs-client-provisioner/deploy.sh b/charts/nfs-client-provisioner/deploy.sh new file mode 100755 index 0000000..934b98d --- /dev/null +++ b/charts/nfs-client-provisioner/deploy.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +namespace=kube-system + +charts=() + +. ../config.sh + +kubectl_apply $namespace "${charts[@]}" +helm template \ + -n $namespace \ + -f values.yaml \ + nfs-client-provisioner \ + stable/nfs-client-provisioner \ + | make_substitutions \ + | kubectl apply -n $namespace -f - + + diff --git a/kubernetes-config/charts/nfs-client-provisioner.yaml b/charts/nfs-client-provisioner/values.yaml similarity index 100% rename from kubernetes-config/charts/nfs-client-provisioner.yaml rename to charts/nfs-client-provisioner/values.yaml diff --git a/charts/prometheus/app-servicemonitor.yaml b/charts/prometheus/app-servicemonitor.yaml new file mode 100644 index 0000000..05de9f0 --- /dev/null +++ b/charts/prometheus/app-servicemonitor.yaml @@ -0,0 +1,19 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: app-monitor + release: prometheus-operator # required by Prometheus CRD + name: app-monitor + namespace: kube-system +spec: + endpoints: + - interval: 30s + port: http + path: /metrics + jobLabel: app.kubernetes.io/instance + namespaceSelector: + any: true + selector: + matchLabels: + prometheus.io/monitor: http diff --git a/charts/prometheus/deploy.sh b/charts/prometheus/deploy.sh new file mode 100644 index 0000000..7b9b2ed --- /dev/null +++ b/charts/prometheus/deploy.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +crd_version="v0.42.0" +namespace=prometheus + +charts=( + etcd-cert-secret.yaml + app-servicemonitor.yaml + grafana-ldap-toml.yaml + grafana-smtp-secret.yaml + prometheus-etcd-cert.yaml + gitlab-redis-servicemonitor.yaml + gitlab-servicemonitor.yaml + ingress-nginx-servicemonitor.yaml +) + +. ../config.sh + +install_prometheus_crds () { + crd=( + monitoring.coreos.com_alertmanagers.yaml + monitoring.coreos.com_podmonitors.yaml + monitoring.coreos.com_probes.yaml + monitoring.coreos.com_prometheuses.yaml + monitoring.coreos.com_prometheusrules.yaml + monitoring.coreos.com_servicemonitors.yaml + monitoring.coreos.com_thanosrulers.yaml + ) + url=https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.42.0/example/prometheus-operator-crd/ + for i in ${crd[@]}; do + kubectl apply -f $url/$i + done +} + +disable_unset () { + read yaml + # disable ldap for grafana + [ -z "$grafana_ldap_toml" ] && \ + echo "$yaml" | sed '/auth\.ldap:/,+1 s/true/false/; /ldap:/,+1 d' | read yaml + # disable storage + [ -z "$fileserver" ] && \ + echo "$yaml" | sed '/prometheusSpec:/,+10d' $1 | read yaml + echo $yaml +} + +kubectl create ns $namespace +install_prometheus_crds +kubectl_apply $namespace "${charts[@]}" + +helm template \ + -n $namespace \ + -f values.yaml \ + prometheus \ + prometheus-community/kube-prometheus-stack \ + | make_substitutions \ + | kubectl -n $namespace -f - + diff --git a/charts/prometheus/etcd-cert-secret.yaml b/charts/prometheus/etcd-cert-secret.yaml new file mode 100644 index 0000000..daa933b --- /dev/null +++ b/charts/prometheus/etcd-cert-secret.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app: prometheus-operator + name: etcd-cert + namespace: prometheus +data: + ca.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURhakNDQWxLZ0F3SUJBZ0lVWXVkVEZ2VEpEYW1QUHRSNFh6dzJTMGROMzZjd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEU1TVRBeE5UQTVNakV3Ck1Gb1hEVEkwTVRBeE16QTVNakV3TUZvd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUsKRXdWT2FYaFBVekVuTUNVR0ExVUVDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWagpNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXVrV0lONVhwRkI2NTJuazdzUGx1CklqNTZTY05sakpOV0JZZXNWaDE4MjhNUE9VVHpuZTgxeUhTMHg3WFVtcGN5VDdDNmRQdlVWckdZUUZCMnZqN0oKcm9Td04xWGtZeW10YXVQVFpqV2J6ZVdSbG1nZ0dpajFhUUZOZy9LelkzZTBkcGdVUEk3TXZQcjFJU001c3JuWgp0YlRRckZmMkVsc3dvVENPMWNYRGMyNTJnblphRW56ZlJEN0o2eXoyVXYvMUZSei9aY29DZVViSnJkNWJjTkk5CmdKYU95MDE0MEdHRzY3WWRtZXBjQWJXeTFOYkFNWlJCamFUUStmZUVWa0p4UGNRNWZqMUhrQ0RuTHJjeEpmdzEKSWhvZFZlNFdLTkhyaUFGR3JTS3JIM1VLb045RFVtU1RGOVVEUWtOQS9zNTRkaEJyMWJFa25lMW1EbGwwYWZhWApqd0lEQVFBQm8wSXdRREFPQmdOVkhROEJBZjhFQkFNQ0FRWXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WCkhRNEVGZ1FVUFQ2THhyWkNVcFEyakRtQ01DRGQ2aVVMbXdnd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLaXMKaFc2bGRBVjZjbnZlM3dhem5aRTVzT1B5T1hRWFdEc2x0a3RzU3B6WU9ocjU5WTZ3Q2oxL05vbGRRaXY4UUJ4TwpQYVF3QUJSL3Evc0p1bUhEODJwSzRwbDZXOThWTDFPdW1wOEoxZjFGV0ZqKzJpMitObG1UUzlHU3FhcGRMdTJoCk9YTUJ6M0JDZFZWSVQ4RGhzZVRBL21WeXlRV1h3LzlsUTYxT1N1c1BubmUwcDBwbkJGTU1TVUhNWFZWeGsrK2UKYzdNTGVVcXFObHliM1JDeGNVQkVTa1h3d1dIaFhhdVR0OTlGQjB5SFNKeTMxd0FNL2pHeUJhdlphb3VMRGticQowNXdwQ3dxQzl6SEFlZElUcThXOUhOQVA5Q1FjejdsV3lDRHhkZ2orN2hoeEdrUUoyYmpFMGxlWlA1bXphRXUyCjdPYUlDVkR0cGE5T2FXY3FpSUE9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0= + etcd-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeVNscjhaYVcwUVptMzlDMElNSVV2V0pnek5DQlJvRlRMMUdaZDZqVldSKzRhMWNTCjg0ZVpuNXhEY2xLL2d3Y0JJcWs3dm9kVXIrenRMVWgzVjIxQnA2ZHljVXpzT041T2lna2l3YTBneStRaHpYZVcKdCtxbStpZFQrbTdzSWMzZVFkd1QxRkt0elRjaGEwMmhNRFNOL2RBTkVOZ0hzUDZTdGZGbyt4UDQ5VS9MRURnUQpZYVM1VlM1L2ZsV2NWejYxMlF4UEtZL2hJejMwdEJYNkQxcHFVL3VyRzVtWmtOUnFFYmVlYWpvNEVNVW5BY3NyCkxhS0txYjlxa2NqcVVwS2FzMTJDNmhvYUFRTkxsL3dYMmkvOVhnSTZxZ2Z6QTNhM0xvSkt3VW8xdVhBY1Jyc3AKQTdkTjlwR3dTakhudXMvaUZpdTBaUTM1ZWhHVjNkYVZ3V1NFYVFJREFRQUJBb0lCQVFDdlU0cmhaWDdOYitiNQo2ODBUUUJVVGUwc2pPQmQwZkFNa210cEhrTDlpam1NT0t6WTVTMVo1NXBoaWEveS9GcnZHbWZtWWRoczV1aVo5CjhVc1N5QWNST01CbVA4VWpTNTdvY0UzNndBcDFxc0JMZEkvSWZKeE1LenJtYXdjajcycG52SWtMNVlQVitOY0gKendGT0lvQWZWOVlMZUQ0NzVhVzVMazl4aGxiV2Rzak9VOW9sTThDNUQvTktadFhOUUZod1N4bHBGNTBPUDEvWgpLQU43WWNYYzJWTGVpV0h0OHBqRGFLekxrbGQ5UFZrWGFiU1h0M251cFdSK0szU3NNRmhyMVNMS3RLTytzN0lvCnQyZUxwMEF2YXFDbjk4MkxMNWNHQXNSQXZmd0UyTWU5clE5Sk1EZTlJU2ZzMDZmRVQvcHdkbVQ5ME0zMlBhQ08KUWpFNmpZSUJBb0dCQVBQR1RxMUhQSVhHS0FVTGM3aWVEb00yaSttWEo2Z3VLYUlzcjFjQmMxKzlwYURIM2hneQpPVkdQeXRjOURIeDV5S055U3hSVEJQcTgyMDhLOGJqUncwWEFKeDFoOU5rWDB2RHhSM29EcGtmWThTY3I5TmhECi9Zb2w2NXRMVDdFZW5KMk9JNVpXR2xYMHY0aHpaQmFaVTN3dnNIVGJBbk04VklaZTczWUUzb0c5QW9HQkFOTkEKQitXazRzdjBOQWVZeFZrOFVOOFhYdS9oRVNQOS9MMGZHZXg0QWYrTTM4dENpZ1kyZ0JwcXIxS2hmbmFKUkxJNAp1R1lEbENrMkNYUWtoem0zSklCSGV5dmc5Wk1BbXJHNHU3YnNJS3lyZEQrNW9Hcm5wSjhZMHBaOWtkWjd3VGwrClJYcnFJelFLQnczUlg3azNVb2FZOWVyVkdSaWpnMDM0OEU0VEp6b2RBb0dBZjR6c2o4ZnRTQVdsM1BYZ0IrSlQKcjU0ejJQWnBIcHVBMTBWUVBWTVovaWRwNlZXS3hOaEttMzJFWWRpa2x1RFY5WWVROTBKMUlZajlxejhMdVd1dwpJK2ZsejM3NHNUckErYWNWZ2ljMHN2VTFRUXpENFFDNlFiV1RzTDdZSk5IaW1xSEx4eGFvVXY5cjFFYWtRUnJhCnp1alpDRnVyellYc3FCNDJaMmNtMFhVQ2dZRUFyclR0MHdZaWF2cHpiVFR6UVhHWWtHZ0xvK2RHL2JVNEtGQm8KYUNCM3hwa0RIRjdUTjRBclFhMnBnWFQ5MlpwOHJjZ3ErSE5OUFNmcmVab2NHNmRURUtRTlFhU3ljL2l6OXZjSQpoNmVRL2p4dHo2WDgvV3pGd0s0T3UxYnVIYXdMaVRqY3pXS0Y2cXZBV2JVbXJVOExlVFZYYS9jQTRsZVlhQXZRCjhVRDEyQ0VDZ1lBUHUwdVlZdnZEUkNtMkNrZUw5U0s2T2o3MEtpb3RucmFSbWdTZVVIRHFLL211NExWejM0Q3cKcGJoTUN4a0I3UlljdXRwcFREdnZVa2lRbEpuek5xNk84Zkp0cEh1MTh0b0RyMWFIcGJhY0QxVFhpRFVjY2kwWQo1enpOOXBMUzc3UkFNWVR5MHJxdUs4L1ZBVWNEb0JVQW5yVVVZT0FPTUIzRlVsbzhtdEJFclE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ== + etcd.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnVENDQW1tZ0F3SUJBZ0lVSWNFZ2FyYTlXdVI3U0l3MkRyTXhoRDFsUno4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1RURVNNQkFHQTFVRUJ4TUpaMlZ1WlhKaGRHVmtNUTR3REFZRFZRUUtFd1ZPYVhoUFV6RW5NQ1VHQTFVRQpDeE1lYzJWeWRtbGpaWE11YTNWaVpYSnVaWFJsY3k1d2Eya3VZMkZUY0dWak1CNFhEVEl3TURreU9ERTJOVEV3Ck1Gb1hEVEl3TVRBeU9ERTJOVEV3TUZvd0R6RU5NQXNHQTFVRUF4TUVhekl0TURDQ0FTSXdEUVlKS29aSWh2Y04KQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU1rcGEvR1dsdEVHWnQvUXRDRENGTDFpWU16UWdVYUJVeTlSbVhlbwoxVmtmdUd0WEV2T0htWitjUTNKU3Y0TUhBU0twTzc2SFZLL3M3UzFJZDFkdFFhZW5jbkZNN0RqZVRvb0pJc0d0CklNdmtJYzEzbHJmcXB2b25VL3B1N0NITjNrSGNFOVJTcmMwM0lXdE5vVEEwamYzUURSRFlCN0Qra3JYeGFQc1QKK1BWUHl4QTRFR0drdVZVdWYzNVZuRmMrdGRrTVR5bVA0U005OUxRVitnOWFhbFA3cXh1Wm1aRFVhaEczbm1vNgpPQkRGSndITEt5MmlpcW0vYXBISTZsS1Ntck5kZ3VvYUdnRURTNWY4Rjlvdi9WNENPcW9IOHdOMnR5NkNTc0ZLCk5ibHdIRWE3S1FPM1RmYVJzRW94NTdyUDRoWXJ0R1VOK1hvUmxkM1dsY0ZraEdrQ0F3RUFBYU9CbGpDQmt6QU8KQmdOVkhROEJBZjhFQkFNQ0I0QXdEQVlEVlIwVEFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVTFtNmdXQjZJaDNzdgpTTUdrVHhBZFVTNy9WS013SHdZRFZSMGpCQmd3Rm9BVVBUNkx4clpDVXBRMmpEbUNNQ0RkNmlVTG13Z3dNd1lEClZSMFJCQ3d3S29JS1pYUmpaQzVzYjJOaGJJSVFaWFJqWkM1cmRXSmxNaTVzYjJOaGJJSUVhekl0TUljRUN2MFMKY2pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQURqR3lSbnRndW9QaUFZeGpRbXBPM2dHSWxjTjNCSVBpVEVEZApEUGsxcGJyakt3Z3FGa0JpU3A1ZmxDbFpCS1lXM3pRRWVKVDEyTCtkczhCMWp5WHVyZ1ZtU1RRWURYYkpiTmNoCmY1WjFyMnQzRXBsOEdTTk5Ec2ZETGo5aUhacml3TUlGRk9XOHNpRnk0ek1SQm4wRC9oeS9LUnVRREQxNHIySG0KWVM3Ty9hUTdaNDBiWThvZ0xVd2oyUHE0M1IxWmhGb0JNR1dFNW5jMW9TVkJHS2NQaWxiby9GSHBJTk1tYmdzbwpNK1FGNTkzWTE2S0o2K1FUKzhUZ1MyMVl6dTQ1RTAwOXMvc1piQkZuL0l1WkJxWHFkZEFZclI4Rm44SytBdGZFCnh6aTFLTnZJWTEzcXRrV21LN3hUTVl6TSsxTEVhOStidkxoNG1ybHFlWTVmVnlBOWF3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +type: Opaque diff --git a/charts/prometheus/gitlab-redis-servicemonitor.yaml b/charts/prometheus/gitlab-redis-servicemonitor.yaml new file mode 100644 index 0000000..3aec2e6 --- /dev/null +++ b/charts/prometheus/gitlab-redis-servicemonitor.yaml @@ -0,0 +1,20 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: gitlab-redis-monitor + release: prometheus-operator # required by Prometheus CRD + name: gitlab-monitor + namespace: gitlab +spec: + endpoints: + - interval: 30s + port: metrics + path: /metrics + jobLabel: app + namespaceSelector: + matchNmaes: + - gitlab + selector: + matchLabels: + app: redis diff --git a/charts/prometheus/gitlab-servicemonitor.yaml b/charts/prometheus/gitlab-servicemonitor.yaml new file mode 100644 index 0000000..f75579b --- /dev/null +++ b/charts/prometheus/gitlab-servicemonitor.yaml @@ -0,0 +1,21 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + annotations: + labels: + app: gitlab-exporter-monitor + release: prometheus-operator + name: gitlab-exporter-monitor + namespace: gitlab +spec: + endpoints: + - interval: 30s + path: /metrics + port: gitlab-exporter + jobLabel: app + namespaceSelector: + matchNames: + - gitlab + selector: + matchLabels: + app: gitlab-exporter diff --git a/kubernetes-config/config/grafana-ldap-toml.yaml b/charts/prometheus/grafana-ldap-toml.yaml similarity index 100% rename from kubernetes-config/config/grafana-ldap-toml.yaml rename to charts/prometheus/grafana-ldap-toml.yaml diff --git a/kubernetes-config/config/grafana-smtp-secret.yaml b/charts/prometheus/grafana-smtp-secret.yaml similarity index 100% rename from kubernetes-config/config/grafana-smtp-secret.yaml rename to charts/prometheus/grafana-smtp-secret.yaml diff --git a/charts/prometheus/ingress-nginx-servicemonitor.yaml b/charts/prometheus/ingress-nginx-servicemonitor.yaml new file mode 100644 index 0000000..66ab74e --- /dev/null +++ b/charts/prometheus/ingress-nginx-servicemonitor.yaml @@ -0,0 +1,23 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: nginx-ingress + release: prometheus-operator # required by Prometheus CRD + name: nginx-ingress + namespace: kube-system +spec: + endpoints: + - interval: 15s + port: metrics + jobLabel: app + selector: + matchExpressions: + - key: app + operator: In + values: + - nginx-ingress + - key: component + operator: In + values: + - controller diff --git a/charts/prometheus/prometheus-etcd-cert.yaml b/charts/prometheus/prometheus-etcd-cert.yaml new file mode 100644 index 0000000..b1ffcf9 --- /dev/null +++ b/charts/prometheus/prometheus-etcd-cert.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app: prometheus-operator + name: etcd-cert + namespace: kube-system +type: Opaque +stringData: + etcd.pem: |- + -----BEGIN CERTIFICATE----- + MIIDgTCCAmmgAwIBAgIUfVvzugELXCci7r1kRjPUOaXt2S4wDQYJKoZIhvcNAQEL + BQAwTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQKEwVOaXhPUzEnMCUGA1UE + CxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVjMB4XDTIwMDEwMjE2MDcw + MFoXDTIwMDIwMTE2MDcwMFowDzENMAsGA1UEAxMEazItMDCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAMkpa/GWltEGZt/QtCDCFL1iYMzQgUaBUy9RmXeo + 1VkfuGtXEvOHmZ+cQ3JSv4MHASKpO76HVK/s7S1Id1dtQaencnFM7DjeTooJIsGt + IMvkIc13lrfqpvonU/pu7CHN3kHcE9RSrc03IWtNoTA0jf3QDRDYB7D+krXxaPsT + +PVPyxA4EGGkuVUuf35VnFc+tdkMTymP4SM99LQV+g9aalP7qxuZmZDUahG3nmo6 + OBDFJwHLKy2iiqm/apHI6lKSmrNdguoaGgEDS5f8F9ov/V4COqoH8wN2ty6CSsFK + NblwHEa7KQO3TfaRsEox57rP4hYrtGUN+XoRld3WlcFkhGkCAwEAAaOBljCBkzAO + BgNVHQ8BAf8EBAMCB4AwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU1m6gWB6Ih3sv + SMGkTxAdUS7/VKMwHwYDVR0jBBgwFoAUPT6LxrZCUpQ2jDmCMCDd6iULmwgwMwYD + VR0RBCwwKoIKZXRjZC5sb2NhbIIQZXRjZC5rdWJlMi5sb2NhbIIEazItMIcECv0S + cjANBgkqhkiG9w0BAQsFAAOCAQEAUfDYNj/Yc4HqMzcez7NGBKOyJsgyuhJ+tnwv + aurgfIVMcePdFLz24guKsGfJotP28M0NCZm7v/2OSPzobXhis1yzZh2rv1KWuEkf + uQheXUZ5u65f9Sc+klO/deubbckBP+0vWg4Ru4v9t+vCXZQt4u4OGqwLsG6KxxtG + yXFSPuOOBLbyZfyGNCkOv64OF0qY648cLRH9mfZ1WOlcRdexLi+mtwQlWlCD+02f + iTZYIYvNHpKb1oa6J7/QguouRTue7ZkQuNG0p7FJiLHs5nt750HKOTsSjxfM5+SA + +rohNvUwao+K7rsLj2k3WSOU/Ju6uSqbtGEFgfh/oUBdkYwKJQ== + -----END CERTIFICATE----- + etcd-key.pem: |- + -----BEGIN RSA PRIVATE KEY----- + MIIEpAIBAAKCAQEAySlr8ZaW0QZm39C0IMIUvWJgzNCBRoFTL1GZd6jVWR+4a1cS + 84eZn5xDclK/gwcBIqk7vodUr+ztLUh3V21Bp6dycUzsON5Oigkiwa0gy+QhzXeW + t+qm+idT+m7sIc3eQdwT1FKtzTcha02hMDSN/dANENgHsP6StfFo+xP49U/LEDgQ + YaS5VS5/flWcVz612QxPKY/hIz30tBX6D1pqU/urG5mZkNRqEbeeajo4EMUnAcsr + LaKKqb9qkcjqUpKas12C6hoaAQNLl/wX2i/9XgI6qgfzA3a3LoJKwUo1uXAcRrsp + A7dN9pGwSjHnus/iFiu0ZQ35ehGV3daVwWSEaQIDAQABAoIBAQCvU4rhZX7Nb+b5 + 680TQBUTe0sjOBd0fAMkmtpHkL9ijmMOKzY5S1Z55phia/y/FrvGmfmYdhs5uiZ9 + 8UsSyAcROMBmP8UjS57ocE36wAp1qsBLdI/IfJxMKzrmawcj72pnvIkL5YPV+NcH + zwFOIoAfV9YLeD475aW5Lk9xhlbWdsjOU9olM8C5D/NKZtXNQFhwSxlpF50OP1/Z + KAN7YcXc2VLeiWHt8pjDaKzLkld9PVkXabSXt3nupWR+K3SsMFhr1SLKtKO+s7Io + t2eLp0AvaqCn982LL5cGAsRAvfwE2Me9rQ9JMDe9ISfs06fET/pwdmT90M32PaCO + QjE6jYIBAoGBAPPGTq1HPIXGKAULc7ieDoM2i+mXJ6guKaIsr1cBc1+9paDH3hgy + OVGPytc9DHx5yKNySxRTBPq8208K8bjRw0XAJx1h9NkX0vDxR3oDpkfY8Scr9NhD + /Yol65tLT7EenJ2OI5ZWGlX0v4hzZBaZU3wvsHTbAnM8VIZe73YE3oG9AoGBANNA + B+Wk4sv0NAeYxVk8UN8XXu/hESP9/L0fGex4Af+M38tCigY2gBpqr1KhfnaJRLI4 + uGYDlCk2CXQkhzm3JIBHeyvg9ZMAmrG4u7bsIKyrdD+5oGrnpJ8Y0pZ9kdZ7wTl+ + RXrqIzQKBw3RX7k3UoaY9erVGRijg0348E4TJzodAoGAf4zsj8ftSAWl3PXgB+JT + r54z2PZpHpuA10VQPVMZ/idp6VWKxNhKm32EYdikluDV9YeQ90J1IYj9qz8LuWuw + I+flz374sTrA+acVgic0svU1QQzD4QC6QbWTsL7YJNHimqHLxxaoUv9r1EakQRra + zujZCFurzYXsqB42Z2cm0XUCgYEArrTt0wYiavpzbTTzQXGYkGgLo+dG/bU4KFBo + aCB3xpkDHF7TN4ArQa2pgXT92Zp8rcgq+HNNPSfreZocG6dTEKQNQaSyc/iz9vcI + h6eQ/jxtz6X8/WzFwK4Ou1buHawLiTjczWKF6qvAWbUmrU8LeTVXa/cA4leYaAvQ + 8UD12CECgYAPu0uYYvvDRCm2CkeL9SK6Oj70KiotnraRmgSeUHDqK/mu4LVz34Cw + pbhMCxkB7RYcutppTDvvUkiQlJnzNq6O8fJtpHu18toDr1aHpbacD1TXiDUcci0Y + 5zzN9pLS77RAMYTy0rquK8/VAUcDoBUAnrUUYOAOMB3FUlo8mtBErQ== + -----END RSA PRIVATE KEY----- + ca.pem: |- + -----BEGIN CERTIFICATE----- + MIIDajCCAlKgAwIBAgIUYudTFvTJDamPPtR4Xzw2S0dN36cwDQYJKoZIhvcNAQEL + BQAwTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQKEwVOaXhPUzEnMCUGA1UE + CxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVjMB4XDTE5MTAxNTA5MjEw + MFoXDTI0MTAxMzA5MjEwMFowTTESMBAGA1UEBxMJZ2VuZXJhdGVkMQ4wDAYDVQQK + EwVOaXhPUzEnMCUGA1UECxMec2VydmljZXMua3ViZXJuZXRlcy5wa2kuY2FTcGVj + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAukWIN5XpFB652nk7sPlu + Ij56ScNljJNWBYesVh1828MPOUTzne81yHS0x7XUmpcyT7C6dPvUVrGYQFB2vj7J + roSwN1XkYymtauPTZjWbzeWRlmggGij1aQFNg/KzY3e0dpgUPI7MvPr1ISM5srnZ + tbTQrFf2ElswoTCO1cXDc252gnZaEnzfRD7J6yz2Uv/1FRz/ZcoCeUbJrd5bcNI9 + gJaOy0140GGG67YdmepcAbWy1NbAMZRBjaTQ+feEVkJxPcQ5fj1HkCDnLrcxJfw1 + IhodVe4WKNHriAFGrSKrH3UKoN9DUmSTF9UDQkNA/s54dhBr1bEkne1mDll0afaX + jwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV + HQ4EFgQUPT6LxrZCUpQ2jDmCMCDd6iULmwgwDQYJKoZIhvcNAQELBQADggEBAKis + hW6ldAV6cnve3waznZE5sOPyOXQXWDsltktsSpzYOhr59Y6wCj1/NoldQiv8QBxO + PaQwABR/q/sJumHD82pK4pl6W98VL1Oump8J1f1FWFj+2i2+NlmTS9GSqapdLu2h + OXMBz3BCdVVIT8DhseTA/mVyyQWXw/9lQ61OSusPnne0p0pnBFMMSUHMXVVxk++e + c7MLeUqqNlyb3RCxcUBESkXwwWHhXauTt99FB0yHSJy31wAM/jGyBavZaouLDkbq + 05wpCwqC9zHAedITq8W9HNAP9CQcz7lWyCDxdgj+7hhxGkQJ2bjE0leZP5mzaEu2 + 7OaICVDtpa9OaWcqiIA= + -----END CERTIFICATE----- diff --git a/charts/prometheus/values.yaml b/charts/prometheus/values.yaml new file mode 100644 index 0000000..c4279e6 --- /dev/null +++ b/charts/prometheus/values.yaml @@ -0,0 +1,2115 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of kube-prometheus-stack for `app:` labels +## +nameOverride: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 +## +kubeTargetVersionOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +## Create default rules for monitoring the cluster +## +defaultRules: + create: true + rules: + alertmanager: true + etcd: true + general: true + k8s: true + kubeApiserver: true + kubeApiserverAvailability: true + kubeApiserverError: true + kubeApiserverSlos: true + kubelet: true + kubePrometheusGeneral: true + kubePrometheusNodeAlerting: true + kubePrometheusNodeRecording: true + kubernetesAbsent: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeScheduler: true + kubeStateMetrics: true + network: true + node: true + prometheus: true + prometheusOperator: true + time: true + + ## Runbook url prefix for default rules + runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md# + ## Reduce app namespace alert scope + appNamespacesTarget: ".*" + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + +## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. +## +# additionalPrometheusRules: [] +# - name: my-rule-file +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## Provide custom recording or alerting rules to be deployed into the cluster. +## +additionalPrometheusRulesMap: {} +# rule-name: +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## +global: + rbac: + create: true + pspEnabled: true + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## +alertmanager: + + ## Deploy alertmanager + ## + enabled: true + + ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 + ## + apiVersion: v2 + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + + ## Configure pod disruption budgets for Alertmanager + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + ## Alertmanager configuration directives + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + config: + global: + resolve_timeout: 5m + route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - match: + alertname: Watchdog + receiver: 'null' + receivers: + - name: 'null' + + ## Pass the Alertmanager configuration directives through Helm's templating + ## engine. If the Alertmanager configuration contains Alertmanager templates, + ## they'll need to be properly escaped so that they are not interpreted by + ## Helm + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + ## https://prometheus.io/docs/alerting/configuration/#tmpl_string + ## https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + tplConfig: false + + ## Alertmanager template files to format alerts + ## ref: https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + ## + templateFiles: {} + # + ## An example template: + # template_1.tmpl: |- + # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} + # + # {{ define "slack.myorg.text" }} + # {{- $root := . -}} + # {{ range .Alerts }} + # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` + # *Cluster:* {{ template "cluster" $root }} + # *Description:* {{ .Annotations.description }} + # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> + # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> + # *Details:* + # {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` + # {{ end }} + # {{ end }} + # {{ end }} + + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: ca-issuer + labels: {} + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: + - alertmanager.k2.local + paths: + - / + tls: + - secretName: alertmanager-general-tls + hosts: + - alertmanager.k2.local + + ## Configuration for Alertmanager secret + ## + secret: + annotations: {} + + ## Configuration for creating an Ingress that will map to each Alertmanager replica service + ## alertmanager.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## Secret name containing the TLS certificate for alertmanager per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "alertmanager" + + ## Configuration for Alertmanager service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Alertmanager Service to listen on + ## + port: 9093 + ## To be used with a proxy extraContainer port + ## + targetPort: 9093 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30903 + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a separate Service for each statefulset Alertmanager replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Alertmanager Service per replica to listen on + ## + port: 9093 + + ## To be used with a proxy extraContainer port + targetPort: 9093 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30904 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "loadbalancer" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + ## If true, create a serviceMonitor for alertmanager + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting alertmanagerSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec + ## + alertmanagerSpec: + ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. + ## + podMetadata: {} + + ## Image of Alertmanager + ## + image: + repository: quay.io/prometheus/alertmanager + tag: v0.21.0 + sha: "" + + ## If true then the user will be responsible to provide a secret with alertmanager configuration + ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used + ## + useExistingSecret: false + + ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the + ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. + ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + ## + configMaps: [] + + ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for + ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. + ## + # configSecret: + + ## Define Log Format + # Use logfmt (default) or json-formatted logging + logFormat: logfmt + + ## Log level for Alertmanager to be configured with. + ## + logLevel: info + + ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the + ## running cluster equal to the expected size. + replicas: 1 + + ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression + ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). + ## + retention: 120h + + ## Storage is the definition of how storage will be used by the Alertmanager instances. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md + ## + storage: {} + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + + ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false + ## + externalUrl: + + ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, + ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. + ## + routePrefix: / + + ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. + ## + paused: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Define resources requests and limits for single Pods. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # requests: + # memory: 400Mi + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + ## + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the alertmanager instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## If specified, the pod's tolerations. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. + ## Note this is only for the Alertmanager UI, not the gossip communication. + ## + listenLocal: false + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. + ## + containers: [] + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + ## + additionalPeers: [] + + ## PortName to use for Alert Manager. + ## + portName: "web" + + ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ## + clusterAdvertiseAddress: false + + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: true + namespaceOverride: "" + + ## Deploy default dashboards. + ## + defaultDashboardsEnabled: true + + adminPassword: prom-operator + + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: ca-issuer + labels: {} + hosts: + - grafana.k2.local + path: / + tls: + - secretName: grafana-general-tls + hosts: + - grafana.k2.local + + sidecar: + dashboards: + enabled: true + label: grafana_dashboard + + ## Annotations for Grafana dashboard configmaps + ## + annotations: {} + datasources: + enabled: true + defaultDatasourceEnabled: true + + ## Annotations for Grafana datasource configmaps + ## + annotations: {} + + ## Create datasource for each Pod of Prometheus StatefulSet; + ## this uses headless service `prometheus-operated` which is + ## created by Prometheus Operator + ## ref: https://git.io/fjaBS + createPrometheusReplicasDatasources: false + label: grafana_datasource + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # configMap: certs-configmap + # readOnly: true + + ## Configure additional grafana datasources (passed through tpl) + ## ref: http://docs.grafana.org/administration/provisioning/#datasources + additionalDataSources: [] + # - name: prometheus-sample + # access: proxy + # basicAuth: true + # basicAuthPassword: pass + # basicAuthUser: daco + # editable: false + # jsonData: + # tlsSkipVerify: true + # orgId: 1 + # type: prometheus + # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 + # version: 1 + + ## Passed to grafana subchart and used by servicemonitor below + ## + service: + portName: service + + ## If true, create a serviceMonitor for grafana + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping the kube api server +## +kubeApiServer: + enabled: true + tlsConfig: + serverName: kubernetes + insecureSkipVerify: true + + ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## +kubelet: + enabled: true + namespace: kube-system + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + + ## Enable scraping /metrics/resource from kubelet's service + ## + resource: true + # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource + resourcePath: "/metrics/resource/v1alpha1" + ## Metric relabellings to apply to samples before ingestion + ## + cAdvisorMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## Metric relabellings to apply to samples before ingestion + ## + probesMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + # relabel configs to apply to samples before ingestion. + # metrics_path is required to match upstream rules and charts + ## + cAdvisorRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + probesRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + resourceRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + metricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + # relabel configs to apply to samples before ingestion. + # metrics_path is required to match upstream rules and charts + ## + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: true + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: + - 10.253.18.114 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + port: 10252 + targetPort: 10252 + # selector: + # component: kube-controller-manager + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: true + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: true + service: + port: 9153 + targetPort: 10055 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping etcd +## +kubeEtcd: + enabled: true + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: + - 10.253.18.114 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + port: 2379 + targetPort: 2379 + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + scheme: https + insecureSkipVerify: true + serverName: "" + caFile: /etc/prometheus/secrets/etcd-cert/ca.pem + certFile: /etc/prometheus/secrets/etcd-cert/etcd.pem + keyFile: /etc/prometheus/secrets/etcd-cert/etcd-key.pem + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: true + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: + - 10.253.18.114 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + port: 10251 + targetPort: 10251 + # selector: + # component: kube-scheduler + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +## Component scraping kube proxy +## +kubeProxy: + enabled: true + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: + - 10.253.18.114 + - 10.253.18.115 + - 10.253.18.116 + - 10.253.18.117 + - 10.253.18.118 + - 10.253.18.103 + + service: + port: 10249 + targetPort: 10249 + # selector: + # k8s-app: kube-proxy + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + +## Component scraping kube state metrics +## +kubeStateMetrics: + enabled: true + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Configuration for kube-state-metrics subchart +## +kube-state-metrics: + namespaceOverride: "" + rbac: + create: true + podSecurityPolicy: + enabled: true + +## Deploy node exporter as a daemonset to all nodes +## +nodeExporter: + enabled: true + + ## Use the value configured in prometheus-node-exporter.podLabels + ## + jobLabel: jobLabel + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. + ## + scrapeTimeout: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - sourceLabels: [__name__] + # separator: ; + # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ + # replacement: $1 + # action: drop + + ## relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Configuration for prometheus-node-exporter subchart +## +prometheus-node-exporter: + namespaceOverride: "" + podLabels: + ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## + jobLabel: node-exporter + extraArgs: + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/) + - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$ + +## Manages Prometheus and Alertmanager components +## +prometheusOperator: + enabled: true + + # Prometheus-Operator v0.39.0 and later support TLS natively. + tls: + enabled: true + + ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted + ## rules from making their way into prometheus and potentially preventing the container from starting + admissionWebhooks: + failurePolicy: Fail + enabled: true + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## + patch: + enabled: true + image: + repository: jettech/kube-webhook-certgen + tag: v1.5.0 + sha: "" + pullPolicy: IfNotPresent + resources: {} + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + affinity: {} + tolerations: [] + + ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). + ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration + ## + namespaces: {} + # releaseNamespace: true + # additional: + # - kube-system + + ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). + ## + denyNamespaces: [] + + ## Filter namespaces to look for prometheus-operator custom resources + ## + alertmanagerInstanceNamespaces: [] + prometheusInstanceNamespaces: [] + thanosInstanceNamespaces: [] + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + ## Configuration for Prometheus operator service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30080 + + nodePortTls: 30443 + + ## Additional ports to open for Prometheus service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "loadbalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Service type + ## NodePort, ClusterIP, loadbalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Labels to add to the operator pod + ## + podLabels: {} + + ## Annotations to add to the operator pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json-formatted logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + ## If true, the operator will create and maintain a service for scraping kubelets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/helm/prometheus-operator/README.md + ## + kubeletService: + enabled: true + namespace: kube-system + + ## Create a servicemonitor for the operator + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + selfMonitor: true + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + ## Prometheus-operator image + ## + image: + repository: quay.io/prometheus-operator/prometheus-operator + tag: v0.43.0 + sha: "" + pullPolicy: IfNotPresent + + ## Configmap-reload image to use for reloading configmaps + ## + configmapReloadImage: + repository: docker.io/jimmidyson/configmap-reload + tag: v0.4.0 + sha: "" + + ## Prometheus-config-reloader image to use for config and rule reloading + ## + prometheusConfigReloaderImage: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.43.0 + sha: "" + + ## Set the prometheus config reloader side-car CPU limit + ## + configReloaderCpu: 100m + + ## Set the prometheus config reloader side-car memory limit + ## + configReloaderMemory: 25Mi + + ## Set a Field Selector to filter watched secrets + ## + secretFieldSelector: "" + + ## kubectl image to use when cleaning up + ## + kubectlImage: + repository: docker.io/bitnami/kubectl + tag: 1.16.15 + sha: "" + pullPolicy: IfNotPresent + +## Deploy a Prometheus instance +## +prometheus: + + enabled: true + + ## Annotations for Prometheus + ## + annotations: {} + + ## Service account for Prometheuses to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + ## Configuration for Prometheus service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Prometheus Service to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30090 + + ## Loadbalancer IP + ## Only use if service.type is "loadbalancer" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + sessionAffinity: "" + + ## Configuration for creating a separate Service for each statefulset Prometheus replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Prometheus Service per replica to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30091 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "loadbalancer" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + ## Configure pod disruption budgets for Prometheus + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + +# Ingress exposes thanos sidecar outside the clsuter + thanosIngress: + enabled: false + annotations: {} + labels: {} + servicePort: 10901 + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - thanos-gateway.domain.com + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## TLS configuration for Alertmanager Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: thanos-gateway-tls + # hosts: + # - thanos-gateway.domain.com + + ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: ca-issuer + labels: {} + + hosts: + - prometheus.k2.local + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: + - / + + ## TLS configuration for Prometheus Ingress + ## Secret must be manually created in the namespace + ## + tls: + - secretName: prometheus-general-tls + hosts: + - prometheus.k2.local + + ## Configuration for creating an Ingress that will map to each Prometheus replica service + ## prometheus.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## Secret name containing the TLS certificate for Prometheus per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "prometheus" + + ## Configure additional options for default pod security policy for Prometheus + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + allowedCapabilities: [] + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + prometheusSpec: + ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos + ## + disableCompaction: false + ## APIServerConfig + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig + ## + apiserverConfig: {} + + ## Interval between consecutive scrapes. + ## + scrapeInterval: "" + + ## Interval between consecutive evaluations. + ## + evaluationInterval: "" + + ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + ## + listenLocal: false + + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. + ## This is disabled by default. + ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + ## + enableAdminAPI: false + + ## Image of Prometheus. + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.22.0 + sha: "" + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Alertmanagers to which alerts will be sent + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints + ## + ## Default configuration will connect to the alertmanager deployed as part of this release + ## + alertingEndpoints: [] + # - name: "" + # namespace: "" + # port: http + # scheme: http + # pathPrefix: "" + # tlsConfig: {} + # bearerTokenFile: "" + # apiVersion: v2 + + ## External labels to add to any time series or alerts when communicating with external systems + ## + externalLabels: {} + + ## Name of the external label used to denote replica name + ## + replicaExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote replica name + ## + replicaExternalLabelNameClear: false + + ## Name of the external label used to denote Prometheus instance name + ## + prometheusExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote Prometheus instance name + ## + prometheusExternalLabelNameClear: false + + ## External URL at which Prometheus will be reachable. + ## + externalUrl: "" + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not + ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated + ## with the new list of secrets. + ## + secrets: + - etcd-cert + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ## + configMaps: [] + + ## QuerySpec defines the query command line flags when starting Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#queryspec + ## + query: {} + + ## Namespaces to be selected for PrometheusRules discovery. + ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + ruleNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the PrometheusRule resources created + ## + ruleSelectorNilUsesHelmValues: true + + ## PrometheusRules to be selected for target discovery. + ## If {}, select all ServiceMonitors + ## + ruleSelector: {} + ## Example which select all prometheusrules resources + ## with label "prometheus" with values any of "example-rules" or "example-rules-2" + # ruleSelector: + # matchExpressions: + # - key: prometheus + # operator: In + # values: + # - example-rules + # - example-rules-2 + # + ## Example which select all prometheusrules resources with label "role" set to "example-rules" + # ruleSelector: + # matchLabels: + # role: example-rules + + ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the servicemonitors created + ## + serviceMonitorSelectorNilUsesHelmValues: true + + ## ServiceMonitors to be selected for target discovery. + ## If {}, select all ServiceMonitors + ## + serviceMonitorSelector: {} + ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" + # serviceMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for ServiceMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + serviceMonitorNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the podmonitors created + ## + podMonitorSelectorNilUsesHelmValues: true + + ## PodMonitors to be selected for target discovery. + ## If {}, select all PodMonitors + ## + podMonitorSelector: {} + ## Example which selects PodMonitors with label "prometheus" set to "somelabel" + # podMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for PodMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + podMonitorNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the probes created + ## + probeSelectorNilUsesHelmValues: true + + ## Probes to be selected for target discovery. + ## If {}, select all Probes + ## + probeSelector: {} + ## Example which selects Probes with label "prometheus" set to "somelabel" + # probeSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for Probe discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + probeNamespaceSelector: {} + + ## How long to retain metrics + ## + retention: 10d + + ## Maximum size of metrics + ## + retentionSize: "" + + ## Enable compression of the write-ahead log using Snappy. + ## + walCompression: false + + ## If true, the Operator won't process any Prometheus configuration changes + ## + paused: false + + ## Number of Prometheus replicas desired + ## + replicas: 1 + + ## Log level for Prometheus be configured in + ## + logLevel: info + + ## Log format for Prometheus be configured in + ## + logFormat: logfmt + + ## Prefix used to register routes, overriding externalUrl route. + ## Useful for proxies that rewrite URLs. + ## + routePrefix: / + + ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the prometheus pods. + ## + podMetadata: {} + # labels: + # app: prometheus + # k8s-app: prometheus + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the prometheus instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## The remote_read spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec + remoteRead: [] + # - url: http://remote1/read + + ## The remote_write spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec + remoteWrite: [] + # - url: http://remote1/push + + ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature + remoteWriteDashboards: false + + ## Resource limits & requests + ## + resources: {} + # requests: + # memory: 400Mi + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md + ## + storageSpec: + volumeClaimTemplate: + spec: + storageClassName: managed-nfs-storage + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 50Gi + # selector: {} + + # Additional volumes on the output StatefulSet definition. + volumes: [] + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations + ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form + ## as specified in the official Prometheus documentation: + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are + ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility + ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible + ## scrape configs are going to break Prometheus after the upgrade. + ## + ## The scrape configuraiton example below will find master nodes, provided they have the name .*mst.*, relabel the + ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes + ## + additionalScrapeConfigs: [] + # - job_name: kube-etcd + # kubernetes_sd_configs: + # - role: node + # scheme: https + # tls_config: + # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client + # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # - source_labels: [__address__] + # action: replace + # targetLabel: __address__ + # regex: ([^:;]+):(\d+) + # replacement: ${1}:2379 + # - source_labels: [__meta_kubernetes_node_name] + # action: keep + # regex: .*mst.* + # - source_labels: [__meta_kubernetes_node_name] + # action: replace + # targetLabel: node + # regex: (.*) + # replacement: ${1} + # metric_relabel_configs: + # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + # action: labeldrop + + ## If additional scrape configurations are already deployed in a single secret file you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalScrapeConfigs + additionalScrapeConfigsSecret: {} + # enabled: false + # name: + # key: + + ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful + ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' + additionalPrometheusSecretsAnnotations: {} + + ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified + ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. + ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. + ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this + ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release + ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. + ## + additionalAlertManagerConfigs: [] + # - consul_sd_configs: + # - server: consul.dev.test:8500 + # scheme: http + # datacenter: dev + # tag_separator: ',' + # services: + # - metrics-prometheus-alertmanager + + ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended + ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the + ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the + ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel + ## configs are going to break Prometheus after the upgrade. + ## + additionalAlertRelabelConfigs: [] + # - separator: ; + # regex: prometheus_replica + # replacement: $1 + # action: labeldrop + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. + ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. + ## This section is experimental, it may change significantly without deprecation notice in any release. + ## This is experimental and may change significantly without backward compatibility in any release. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#thanosspec + ## + thanos: {} + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. + ## if using proxy extraContainer update targetPort with proxy container port + containers: [] + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## PortName to use for Prometheus. + ## + portName: "web" + + additionalServiceMonitors: [] + ## Name of the ServiceMonitor to create + ## + # - name: "" + + ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from + ## the chart + ## + # additionalLabels: {} + + ## Service label for use in assembling a job name of the form