Automated upload for observability.buildth.ing

This commit is contained in:
Automated pipeline 2026-03-04 09:55:46 +00:00 committed by Actions pipeline
parent f15b30d02c
commit 464a9eb22e
32 changed files with 890 additions and 118 deletions

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: coder-reg
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: "otc/observability.buildth.ing/stacks/coder"
repoURL: "https://edp.buildth.ing/DevFW-CICD/stacks-instances"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: docs-reg
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: argocd-stack
repoURL: "https://edp.buildth.ing/DevFW-CICD/website-and-documentation"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: garm-reg
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: "otc/observability.buildth.ing/stacks/garm"
repoURL: "https://edp.buildth.ing/DevFW-CICD/stacks-instances"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: terralist-reg
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: "otc/observability.buildth.ing/stacks/terralist"
repoURL: "https://edp.buildth.ing/DevFW-CICD/stacks-instances"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,32 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: coder
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: coder
sources:
- repoURL: https://helm.coder.com/v2
chart: coder
targetRevision: 2.28.3
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/coder/coder/values.yaml
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
path: "otc/observability.buildth.ing/stacks/coder/coder/manifests"

View file

@ -0,0 +1,38 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: coder-db
namespace: coder
spec:
instances: 1
primaryUpdateStrategy: unsupervised
resources:
requests:
memory: "1Gi"
cpu: "1"
limits:
memory: "1Gi"
cpu: "1"
managed:
roles:
- name: coder
createdb: true
login: true
passwordSecret:
name: coder-db-user
storage:
size: 10Gi
storageClass: csi-disk
---
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
name: coder
namespace: coder
spec:
cluster:
name: coder-db
name: coder
owner: coder
---

View file

@ -0,0 +1,61 @@
coder:
# You can specify any environment variables you'd like to pass to Coder
# here. Coder consumes environment variables listed in
# `coder server --help`, and these environment variables are also passed
# to the workspace provisioner (so you can consume them in your Terraform
# templates for auth keys etc.).
#
# Please keep in mind that you should not set `CODER_HTTP_ADDRESS`,
# `CODER_TLS_ENABLE`, `CODER_TLS_CERT_FILE` or `CODER_TLS_KEY_FILE` as
# they are already set by the Helm chart and will cause conflicts.
env:
- name: CODER_ACCESS_URL
value: https://coder.observability.buildth.ing
- name: CODER_PG_CONNECTION_URL
valueFrom:
secretKeyRef:
# You'll need to create a secret called coder-db-url with your
# Postgres connection URL like:
# postgres://coder:password@postgres:5432/coder?sslmode=disable
name: coder-db-user
key: url
# For production deployments, we recommend configuring your own GitHub
# OAuth2 provider and disabling the default one.
- name: CODER_OAUTH2_GITHUB_DEFAULT_PROVIDER_ENABLE
value: "false"
- name: EDGE_CONNECT_ENDPOINT
valueFrom:
secretKeyRef:
name: edge-credential
key: endpoint
- name: EDGE_CONNECT_USERNAME
valueFrom:
secretKeyRef:
name: edge-credential
key: username
- name: EDGE_CONNECT_PASSWORD
valueFrom:
secretKeyRef:
name: edge-credential
key: password
# (Optional) For production deployments the access URL should be set.
# If you're just trying Coder, access the dashboard via the service IP.
# - name: CODER_ACCESS_URL
# value: "https://coder.example.com"
#tls:
# secretNames:
# - my-tls-secret-name
service:
type: ClusterIP
ingress:
enable: true
className: nginx
host: coder.observability.buildth.ing
annotations:
cert-manager.io/cluster-issuer: main
tls:
enable: true
secretName: coder-tls-secret

View file

@ -23,7 +23,7 @@ spec:
# TODO: RIRE Can be updated when https://github.com/argoproj/argo-cd/issues/20790 is fixed and merged
# As logout make problems, it is suggested to switch from path based routing to an own argocd domain,
# similar to the CNOE amazon reference implementation and in our case, Forgejo
targetRevision: argo-cd-7.8.28
targetRevision: argo-cd-9.4.6
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/core/argocd/values.yaml
@ -32,4 +32,4 @@ spec:
ref: values
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
path: "otc/observability.buildth.ing/stacks/core/argocd/manifests"
path: "otc/observability.buildth.ing/stacks/core/argocd/manifests"

View file

@ -5,6 +5,16 @@ configs:
params:
server.insecure: true
cm:
oidc.config: |
name: FORGEJO
issuer: https://dex.observability.buildth.ing
clientID: controller-argocd-dex
clientSecret: $dex-argo-client:clientSecret
requestedScopes:
- openid
- profile
- email
- groups
application.resourceTrackingMethod: annotation
timeout.reconciliation: 60s
resource.exclusions: |
@ -18,10 +28,9 @@ configs:
- CiliumIdentity
clusters:
- "*"
accounts.provider-argocd: apiKey
url: https://argocd.observability.buildth.ing
rbac:
policy.csv: 'g, provider-argocd, role:admin'
policy.csv: 'g, DevFW, role:admin'
tls:
certificates:

View file

@ -0,0 +1,30 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cloudnative-pg
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
retry:
limit: -1
destination:
name: in-cluster
namespace: cloudnative-pg
sources:
- repoURL: https://cloudnative-pg.github.io/charts
chart: cloudnative-pg
targetRevision: 0.26.1
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/core/cloudnative-pg/values.yaml
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -0,0 +1 @@
# No need for values here.

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dex
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: dex
sources:
- repoURL: https://charts.dexidp.io
chart: dex
targetRevision: 0.23.0
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/core/dex/values.yaml
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,76 @@
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: main
hosts:
- host: dex.observability.buildth.ing
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- dex.observability.buildth.ing
secretName: dex-cert
envVars:
- name: FORGEJO_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: dex-forgejo-client
key: clientSecret
- name: FORGEJO_CLIENT_ID
valueFrom:
secretKeyRef:
name: dex-forgejo-client
key: clientID
- name: OIDC_DEX_GRAFANA_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: dex-grafana-client
key: clientSecret
- name: OIDC_DEX_ARGO_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: dex-argo-client
key: clientSecret
- name: LOG_LEVEL
value: debug
config:
# Set it to a valid URL
issuer: https://dex.observability.buildth.ing
# See https://dexidp.io/docs/storage/ for more options
storage:
type: memory
oauth2:
skipApprovalScreen: true
alwaysShowLoginScreen: false
connectors:
- type: gitea
id: gitea
name: Forgejo
config:
clientID: "$FORGEJO_CLIENT_ID"
clientSecret: "$FORGEJO_CLIENT_SECRET"
redirectURI: https://dex.observability.buildth.ing/callback
baseURL: https://edp.buildth.ing
# loadAllGroups: true
orgs:
- name: DevFW
enablePasswordDB: false
staticClients:
- id: controller-argocd-dex
name: ArgoCD Client
redirectURIs:
- "https://argocd.observability.buildth.ing/auth/callback"
secretEnv: "OIDC_DEX_ARGO_CLIENT_SECRET"
- id: grafana
redirectURIs:
- "https://grafana.observability.buildth.ing/login/generic_oauth"
name: "Grafana"
secretEnv: "OIDC_DEX_GRAFANA_CLIENT_SECRET"

View file

@ -28,7 +28,7 @@ spec:
# https://forgejo.org/docs/v1.21/admin/actions/#offline-registration
initContainers:
- name: runner-register
image: code.forgejo.org/forgejo/runner:6.4.0
image: code.forgejo.org/forgejo/runner:12.6.4
command:
- "sh"
- "-c"
@ -39,7 +39,7 @@ spec:
--token ${RUNNER_SECRET} \
--name ${RUNNER_NAME} \
--instance ${FORGEJO_INSTANCE_URL} \
--labels docker:docker://node:20-bookworm,ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04,ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-22.04
--labels docker:docker://node:24-bookworm,ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04,ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-24.04,ubuntu-24.04:docker://ghcr.io/catthehacker/ubuntu:act-24.04
env:
- name: RUNNER_NAME
valueFrom:
@ -57,8 +57,8 @@ spec:
mountPath: /data
containers:
- name: runner
image: code.forgejo.org/forgejo/runner:6.4.0
command:
image: code.forgejo.org/forgejo/runner:12.6.4
command:
- "sh"
- "-c"
- |

View file

@ -20,7 +20,7 @@ spec:
sources:
- repoURL: https://code.forgejo.org/forgejo-helm/forgejo-helm.git
path: .
targetRevision: v12.0.0
targetRevision: v16.2.0
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/forgejo/forgejo-server/values.yaml

View file

@ -5,50 +5,58 @@ metadata:
namespace: gitea
spec:
schedule: "0 1 * * *"
concurrencyPolicy: "Forbid"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
startingDeadlineSeconds: 600 # 10 minutes
jobTemplate:
spec:
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
activeDeadlineSeconds: 1350
backoffLimit: 2
ttlSecondsAfterFinished: 259200 #
template:
spec:
containers:
- name: rclone
image: rclone/rclone:1.70
imagePullPolicy: IfNotPresent
env:
- name: SOURCE_BUCKET
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: bucket-name
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: secret-key
volumeMounts:
- name: rclone-config
mountPath: /config/rclone
readOnly: true
- name: backup-dir
mountPath: /backup
readOnly: false
command:
- /bin/sh
- -c
- |
rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
- name: rclone
image: rclone/rclone:1.70
imagePullPolicy: IfNotPresent
env:
- name: SOURCE_BUCKET
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: bucket-name
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: secret-key
volumeMounts:
- name: rclone-config
mountPath: /config/rclone
readOnly: true
- name: backup-dir
mountPath: /backup
readOnly: false
command:
- /bin/sh
- -c
- |
rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
restartPolicy: OnFailure
volumes:
- name: rclone-config
secret:
secretName: forgejo-s3-backup
- name: backup-dir
persistentVolumeClaim:
claimName: s3-backup
- name: rclone-config
secret:
secretName: forgejo-s3-backup
- name: backup-dir
persistentVolumeClaim:
claimName: s3-backup
---
apiVersion: v1
kind: PersistentVolumeClaim
@ -56,7 +64,7 @@ metadata:
name: s3-backup
namespace: gitea
annotations:
everest.io/disk-volume-type: SATA
everest.io/disk-volume-type: GPSSD
everest.io/crypt-key-id: c02a26f1-3c7e-486d-ba5a-266c321bb203
spec:
storageClassName: csi-disk
@ -64,7 +72,7 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 50Gi
storage: 100Gi
---
apiVersion: v1
kind: Secret

View file

@ -1,4 +1,4 @@
# This is only used for deploying older versions of infra-catalogue where the bucket name is not an output of the terragrunt modules# We use recreate to make sure only one instance with one version is running, because Forgejo might break or data gets inconsistant.
# This is only used for deploying older versions of infra-catalogue where the bucket name is not an output of the terragrunt modules# We use recreate to make sure only one instance with one version is running, because Forgejo might break or data gets inconsistant.
strategy:
type: Recreate
@ -166,7 +166,7 @@ service:
nodePort: 32222
externalTrafficPolicy: Cluster
annotations:
kubernetes.io/elb.id: 3c90c465-804a-4682-ba55-111ce827e69c
kubernetes.io/elb.id: 3c90c465-804a-4682-ba55-111ce827e69c
image:
pullPolicy: "IfNotPresent"
@ -177,15 +177,16 @@ image:
fullOverride: edp.buildth.ing/devfw-cicd/edp-forgejo:v11.0.3-edp1
forgejo:
runner:
enabled: true
image:
tag: latest
# replicas: 3
config:
runner:
labels:
- docker:docker://node:16-bullseye
- self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-22.04
# This appears to be governed by forgejo-runner, as intuition would suggest
# runner:
# enabled: true
# image:
# tag: latest
# # replicas: 3
# config:
# runner:
# labels:
# - docker:docker://node:16-bullseye
# - self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
# - ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04
# - ubuntu-latest:docker://ghcr.io/catthehacker/ubuntu:act-22.04

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: garm
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: garm
sources:
- repoURL: https://edp.buildth.ing/DevFW-CICD/garm-helm
path: charts/garm
targetRevision: v0.0.6
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/garm/garm/values.yaml
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,38 @@
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: main
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
hosts:
- host: garm.observability.buildth.ing
paths:
- path: /
pathType: Prefix
tls:
- secretName: garm-net-tls
hosts:
- garm.observability.buildth.ing
# Credentials and Secrets
credentials:
edgeConnect:
existingSecretName: "edge-credential"
gitea:
url: "https://observability.buildth.ing" # Required
db:
existingSecretName: garm-fixed-credentials
providerConfig:
edgeConnect:
organization: edp2
region: EU
edgeConnectUrl: "https://hub.apps.edge.platform.mg3.mdb.osc.live"
cloudlet:
name: Hamburg
organization: TelekomOP
garm:
logging:
logLevel: info

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: optimiser-receiver
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: garm
source:
repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
path: "otc/observability.buildth.ing/stacks/garm/optimiser-receiver"

View file

@ -0,0 +1,93 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: optimiser-receiver
labels:
app: optimiser-receiver
spec:
replicas: 1
selector:
matchLabels:
app: optimiser-receiver
template:
metadata:
labels:
app: optimiser-receiver
spec:
containers:
- name: receiver
image: edp.buildth.ing/devfw-cicd/forgejo-runner-optimiser-receiver:0.0.3
args:
- --db=/data/metrics.db
ports:
- name: http
containerPort: 8080
protocol: TCP
env:
- name: RECEIVER_READ_TOKEN
valueFrom:
secretKeyRef:
name: optimiser-tokens
key: read-token
- name: RECEIVER_HMAC_KEY
valueFrom:
secretKeyRef:
name: optimiser-tokens
key: hmac-key
volumeMounts:
- name: data
mountPath: /data
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 30
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 2
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 128Mi
volumes:
- name: data
persistentVolumeClaim:
claimName: optimiser-receiver-data
---
apiVersion: v1
kind: Service
metadata:
name: optimiser-receiver
labels:
app: optimiser-receiver
spec:
selector:
app: optimiser-receiver
ports:
- name: http
port: 8080
targetPort: http
protocol: TCP
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: optimiser-receiver-data
labels:
app: optimiser-receiver
annotations:
everest.io/disk-volume-type: GPSSD
spec:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View file

@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
cert-manager.io/cluster-issuer: main
name: optimiser-receiver
namespace: garm
spec:
ingressClassName: nginx
rules:
- host: optimiser.observability.buildth.ing
http:
paths:
- backend:
service:
name: optimiser-receiver
port:
number: 8080
path: /
pathType: Prefix
tls:
- hosts:
- optimiser.observability.buildth.ing
secretName: optimiser-receiver-tls

View file

@ -48,7 +48,7 @@ customConfig:
type: elasticsearch
inputs: [parser]
endpoints:
- https://o12y.observability.buildth.ing/insert/elasticsearch/
- https://o12y.observability./insert/elasticsearch/
auth:
strategy: basic
user: ${VECTOR_USER}

View file

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: simple-user-secret
namespace: observability
type: Opaque
stringData:
username: simple-user
password: simple-password

View file

@ -296,7 +296,8 @@ vmsingle:
# -- Enable deployment of ingress for server component
enabled: false
# -- Ingress annotations
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Ingress extra labels
@ -346,7 +347,8 @@ vmcluster:
resources:
requests:
storage: 10Gi
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: 1500Mi
@ -363,7 +365,8 @@ vmcluster:
resources:
requests:
storage: 2Gi
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: "1000Mi"
@ -376,7 +379,8 @@ vmcluster:
port: "8480"
replicaCount: 2
extraArgs: {}
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: 1000Mi
@ -469,7 +473,8 @@ vmcluster:
enabled: false
# -- Ingress annotations
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
@ -635,7 +640,8 @@ alertmanager:
enabled: true
# -- (object) Extra alert templates
templateFiles: {}
templateFiles:
{}
# template_1.tmpl: |-
# {{ define "hello" -}}
# hello, Victoria!
@ -649,7 +655,8 @@ alertmanager:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -692,7 +699,8 @@ vmalert:
externalLabels: {}
# -- (object) Extra VMAlert annotation templates
templateFiles: {}
templateFiles:
{}
# template_1.tmpl: |-
# {{ define "hello" -}}
# hello, Victoria!
@ -715,7 +723,8 @@ vmalert:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -769,7 +778,7 @@ vmagent:
# -- Remote write configuration of VMAgent, allowed parameters defined in a [spec](https://docs.victoriametrics.com/operator/api#vmagentremotewritespec)
additionalRemoteWrites:
# []
- url: https://o12y.observability.buildth.ing/api/v1/write
- url: https://o12y.observability./api/v1/write
basicAuth:
username:
name: simple-user-secret
@ -782,7 +791,7 @@ vmagent:
port: "8429"
selectAllByDefault: true
scrapeInterval: 20s
externalLabels:
externalLabels:
cluster_environment: "observability"
# For multi-cluster setups it is useful to use "cluster" label to identify the metrics source.
# For example:
@ -799,7 +808,8 @@ vmagent:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -858,7 +868,7 @@ defaultDatasources:
implementation: prometheus
# -- Configure additional grafana datasources (passed through tpl).
# Check [here](http://docs.grafana.org/administration/provisioning/#datasources) for details
extra:
extra:
- name: victoria-logs
access: proxy
type: VictoriaLogs
@ -902,7 +912,8 @@ grafana:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -936,7 +947,7 @@ grafana:
matchLabels:
app.kubernetes.io/name: '{{ include "grafana.name" .Subcharts.grafana }}'
endpoints:
- port: "{{ .Values.grafana.service.portName }}"
- port: '{{ .Values.grafana.service.portName }}'
# -- prometheus-node-exporter dependency chart configuration. For possible values check [here](https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus-node-exporter/values.yaml)
prometheus-node-exporter:
@ -1067,7 +1078,7 @@ kubeApiServer:
# Component scraping the kube controller manager
kubeControllerManager:
# -- Enable kube controller manager metrics scraping
enabled: false
enabled: true
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
endpoints: []
@ -1200,7 +1211,7 @@ kubeEtcd:
# Component scraping kube scheduler
kubeScheduler:
# -- Enable KubeScheduler metrics scraping
enabled: false
enabled: true
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
endpoints: []
@ -1274,3 +1285,4 @@ kubeProxy:
# -- Add extra objects dynamically to this chart
extraObjects: []

View file

@ -8,7 +8,7 @@ spec:
persistentVolumeClaim:
metadata:
annotations:
everest.io/disk-volume-type: SATA
everest.io/disk-volume-type: GPSSD
everest.io/crypt-key-id: c02a26f1-3c7e-486d-ba5a-266c321bb203
spec:
storageClassName: csi-disk
@ -17,6 +17,40 @@ spec:
resources:
requests:
storage: 10Gi
deployment:
spec:
template:
spec:
containers:
- name: grafana
env:
- name: OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
key: clientSecret
name: dex-grafana-client
config:
log.console:
level: debug
server:
root_url: "https://grafana.observability.buildth.ing"
auth:
disable_login: "true"
disable_login_form: "true"
auth.generic_oauth:
enabled: "true"
name: Forgejo
allow_sign_up: "true"
use_refresh_token: "true"
client_id: grafana
client_secret: $__env{OAUTH_CLIENT_SECRET}
scopes: openid email profile offline_access groups
auth_url: https://dex.observability.buildth.ing/auth
token_url: https://dex.observability.buildth.ing/token
api_url: https://dex.observability.buildth.ing/userinfo
redirect_uri: https://grafana.observability.buildth.ing/login/generic_oauth
role_attribute_path: "contains(groups[*], 'DevFW') && 'GrafanaAdmin' || 'None'"
allow_assign_grafana_admin: "true"
ingress:
metadata:
annotations:

View file

@ -27,3 +27,14 @@ spec:
annotations:
value: "{{ $value }}"
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'
- name: disk-consumption-high
rules:
- alert: disk consumption high
expr: 1-(kubelet_volume_stats_available_bytes / kubelet_volume_stats_capacity_bytes) > 0.6
for: 30s
labels:
severity: major
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'disk consumption of pvc {{ $labels.namespace }}/{{ $labels.persistentvolumeclaim }} is high in cluster environment {{ $labels.cluster_environment }}'

View file

@ -9,7 +9,7 @@ spec:
storageMetadata:
annotations:
everest.io/crypt-key-id: c02a26f1-3c7e-486d-ba5a-266c321bb203
everest.io/disk-volume-type: SATA
everest.io/disk-volume-type: GPSSD
storage:
storageClassName: csi-disk
accessModes:

View file

@ -5,11 +5,13 @@ metadata:
namespace: observability
spec:
username: simple-user
password: simple-password
passwordRef:
key: password
name: simple-user-secret
targetRefs:
- static:
url: http://vmsingle-o12y:8429
paths: ["/api/v1/write"]
- static:
url: http://vlogs-victorialogs:9428
paths: ["/insert/elasticsearch/.*"]
paths: ["/insert/elasticsearch/.*"]

View file

@ -289,7 +289,7 @@ vmsingle:
storageMetadata:
annotations:
everest.io/crypt-key-id: c02a26f1-3c7e-486d-ba5a-266c321bb203
everest.io/disk-volume-type: SATA
everest.io/disk-volume-type: GPSSD
storage:
storageClassName: csi-disk
accessModes:
@ -301,7 +301,8 @@ vmsingle:
# -- Enable deployment of ingress for server component
enabled: false
# -- Ingress annotations
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Ingress extra labels
@ -350,8 +351,9 @@ vmcluster:
spec:
resources:
requests:
storage: 10Gi
resources: {}
storage: 10Gi
resources:
{}
# limits:
# cpu: "1"
# memory: 1500Mi
@ -368,7 +370,8 @@ vmcluster:
resources:
requests:
storage: 2Gi
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: "1000Mi"
@ -381,7 +384,8 @@ vmcluster:
port: "8480"
replicaCount: 2
extraArgs: {}
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: 1000Mi
@ -474,7 +478,8 @@ vmcluster:
enabled: false
# -- Ingress annotations
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
@ -538,7 +543,7 @@ alertmanager:
config:
route:
receiver: "blackhole"
routes:
routes:
- matchers:
- severity=~"critical|major"
receiver: outlook
@ -546,15 +551,15 @@ alertmanager:
- name: blackhole
- name: outlook
email_configs:
- smarthost: "mail.mms-support.de:465"
auth_username: "ipcei-cis-devfw@mms-support.de"
auth_password:
- smarthost: 'mail.mms-support.de:465'
auth_username: 'ipcei-cis-devfw@mms-support.de'
auth_password:
name: email-user-credentials
key: connection-string
from: '"IPCEI CIS DevFW" <ipcei-cis-devfw@mms-support.de>'
to: "f9f9953a.mg.telekom.de@de.teams.ms"
to: 'f9f9953a.mg.telekom.de@de.teams.ms'
headers:
subject: "Grafana Mail Alerts"
subject: 'Grafana Mail Alerts'
require_tls: false
# -- Better alert templates for [slack source](https://gist.github.com/milesbxf/e2744fc90e9c41b47aa47925f8ff6512)
@ -562,7 +567,8 @@ alertmanager:
enabled: true
# -- (object) Extra alert templates
templateFiles: {}
templateFiles:
{}
# template_1.tmpl: |-
# {{ define "hello" -}}
# hello, Victoria!
@ -576,7 +582,8 @@ alertmanager:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -619,7 +626,8 @@ vmalert:
externalLabels: {}
# -- (object) Extra VMAlert annotation templates
templateFiles: {}
templateFiles:
{}
# template_1.tmpl: |-
# {{ define "hello" -}}
# hello, Victoria!
@ -642,7 +650,8 @@ vmalert:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -681,9 +690,9 @@ vmauth:
annotations:
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
cert-manager.io/cluster-issuer: main
host: o12y.observability.buildth.ing
host: o12y.observability.
tlsHosts:
- o12y.observability.buildth.ing
- o12y.observability.
tlsSecretName: vmauth-tls-secret
unauthorizedUserAccessSpec: {}
selectAllByDefault: true
@ -694,7 +703,8 @@ vmagent:
# -- VMAgent annotations
annotations: {}
# -- Remote write configuration of VMAgent, allowed parameters defined in a [spec](https://docs.victoriametrics.com/operator/api#vmagentremotewritespec)
additionalRemoteWrites: []
additionalRemoteWrites:
[]
#- url: http://some-remote-write/api/v1/write
# -- (object) Full spec for VMAgent CRD. Allowed values described [here](https://docs.victoriametrics.com/operator/api#vmagentspec)
spec:
@ -717,7 +727,8 @@ vmagent:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -781,7 +792,7 @@ defaultDatasources:
implementation: prometheus
# -- Configure additional grafana datasources (passed through tpl).
# Check [here](http://docs.grafana.org/administration/provisioning/#datasources) for details
extra:
extra:
- name: VictoriaLogs
access: proxy
type: victoriametrics-logs-datasource
@ -832,7 +843,7 @@ grafana:
# Uncomment the block below, if you want to enable VictoriaMetrics Datasource in Grafana:
# Note that Grafana will need internet access to install the datasource plugin.
plugins:
- victoriametrics-metrics-datasource
- victoriametrics-logs-datasource
@ -843,7 +854,8 @@ grafana:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -877,7 +889,7 @@ grafana:
matchLabels:
app.kubernetes.io/name: '{{ include "grafana.name" .Subcharts.grafana }}'
endpoints:
- port: "{{ .Values.grafana.service.portName }}"
- port: '{{ .Values.grafana.service.portName }}'
# -- prometheus-node-exporter dependency chart configuration. For possible values check [here](https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus-node-exporter/values.yaml)
prometheus-node-exporter:
@ -1215,3 +1227,4 @@ kubeProxy:
# -- Add extra objects dynamically to this chart
extraObjects: []

View file

@ -0,0 +1,30 @@
# helm upgrade --install --create-namespace --namespace terralist terralist oci://ghcr.io/terralist/helm-charts/terralist -f terralist-values.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: terralist
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: terralist
sources:
- repoURL: https://github.com/terralist/helm-charts
path: charts/terralist
targetRevision: terralist-0.8.1
helm:
valueFiles:
- $values/otc/observability.buildth.ing/stacks/terralist/terralist/values.yaml
- repoURL: https://edp.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,87 @@
controllers:
main:
strategy: Recreate
containers:
app:
env:
- name: TERRALIST_OAUTH_PROVIDER
value: oidc
- name: TERRALIST_OI_CLIENT_ID
valueFrom:
secretKeyRef:
name: oidc-credentials
key: client-id
- name: TERRALIST_OI_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oidc-credentials
key: client-secret
- name: TERRALIST_OI_AUTHORIZE_URL
valueFrom:
secretKeyRef:
name: oidc-credentials
key: authorize-url
- name: TERRALIST_OI_TOKEN_URL
valueFrom:
secretKeyRef:
name: oidc-credentials
key: token-url
- name: TERRALIST_OI_USERINFO_URL
valueFrom:
secretKeyRef:
name: oidc-credentials
key: userinfo-url
- name: TERRALIST_OI_SCOPE
valueFrom:
secretKeyRef:
name: oidc-credentials
key: scope
- name: TERRALIST_TOKEN_SIGNING_SECRET
valueFrom:
secretKeyRef:
name: terralist-secret
key: token-signing-secret
- name: TERRALIST_COOKIE_SECRET
valueFrom:
secretKeyRef:
name: terralist-secret
key: cookie-secret
- name: TERRALIST_URL
value: https://terralist.observability.buildth.ing
- name: TERRALIST_SQLITE_PATH
value: /data/db.sqlite
- name: TERRALIST_LOCAL_STORE
value: /data/modules
- name: TERRALIST_PROVIDERS_ANONYMOUS_READ
value: "true"
ingress:
main:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: main
hosts:
- host: terralist.observability.buildth.ing
paths:
- path: /
pathType: Prefix
service:
identifier: main
port: http
tls:
- hosts:
- terralist.observability.buildth.ing
secretName: terralist-tls-secret
persistence:
data:
enabled: true
accessMode: ReadWriteOnce
size: 10Gi
retain: false
storageClass: "csi-disk"
annotations:
everest.io/disk-volume-type: GPSSD
globalMounts:
- path: /data