Automated upload for edp.buildth.ing

This commit is contained in:
Automated pipeline 2025-08-13 07:58:03 +00:00 committed by Actions pipeline
parent 00a382cc7f
commit 89437b3b6d
6 changed files with 94 additions and 138 deletions

View file

@ -5,57 +5,69 @@ metadata:
namespace: gitea namespace: gitea
spec: spec:
schedule: "0 1 * * *" schedule: "0 1 * * *"
concurrencyPolicy: "Forbid"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
startingDeadlineSeconds: 600 # 10 minutes
jobTemplate: jobTemplate:
spec: spec:
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
activeDeadlineSeconds: 1350
backoffLimit: 2
ttlSecondsAfterFinished: 259200 #
template: template:
spec: spec:
containers: containers:
- name: rclone - name: rclone
image: rclone/rclone:1.70 image: rclone/rclone:1.70
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
- name: SOURCE_BUCKET - name: SOURCE_BUCKET
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: forgejo-cloud-credentials name: forgejo-cloud-credentials
key: bucket-name key: bucket-name
- name: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY_ID
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: forgejo-cloud-credentials name: forgejo-cloud-credentials
key: access-key key: access-key
- name: AWS_SECRET_ACCESS_KEY - name: AWS_SECRET_ACCESS_KEY
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: forgejo-cloud-credentials name: forgejo-cloud-credentials
key: secret-key key: secret-key
volumeMounts: volumeMounts:
- name: rclone-config - name: rclone-config
mountPath: /config/rclone mountPath: /config/rclone
readOnly: true readOnly: true
- name: backup-dir - name: backup-dir
mountPath: /backup mountPath: /backup
readOnly: false readOnly: false
command: command:
- /bin/sh - /bin/sh
- -c - -c
- | - |
rclone sync source:/${SOURCE_BUCKET}/packages /backup -v --ignore-checksum rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
restartPolicy: OnFailure restartPolicy: OnFailure
volumes: volumes:
- name: rclone-config - name: rclone-config
secret: secret:
secretName: forgejo-s3-backup secretName: forgejo-s3-backup
- name: backup-dir - name: backup-dir
persistentVolumeClaim: persistentVolumeClaim:
claimName: s3-backup claimName: s3-backup
--- ---
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: s3-backup name: s3-backup
namespace: gitea namespace: gitea
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: { { { .Env.PVC_KMS_KEY_ID } } }
spec: spec:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:

View file

@ -17,8 +17,10 @@ postgresql-ha:
persistence: persistence:
enabled: true enabled: true
size: 200Gi size: 200Gi
storageClass: csi-disk
annotations: annotations:
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225 everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
everest.io/disk-volume-type: GPSSD
test: test:
enabled: false enabled: false

View file

@ -6,7 +6,12 @@ metadata:
dashboards: "grafana" dashboards: "grafana"
spec: spec:
persistentVolumeClaim: persistentVolumeClaim:
metadata:
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
spec: spec:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:

View file

@ -11,8 +11,19 @@ spec:
expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1 expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1
for: 30s for: 30s
labels: labels:
severity: major severity: critical
job: "{{ $labels.job }}" job: "{{ $labels.job }}"
annotations: annotations:
value: "{{ $value }}" value: "{{ $value }}"
description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}' description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}'
- name: forgejo-backup
rules:
- alert: forgejo s3 backup job failed
expr: max by(cluster_environment) (kube_job_status_failed{job_name=~"forgejo-s3-backup-.*"}) != 0
for: 30s
labels:
severity: critical
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'

View file

@ -9,7 +9,9 @@ spec:
storageMetadata: storageMetadata:
annotations: annotations:
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225 everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
everest.io/disk-volume-type: SATA
storage: storage:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:

View file

@ -289,7 +289,9 @@ vmsingle:
storageMetadata: storageMetadata:
annotations: annotations:
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225 everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
everest.io/disk-volume-type: SATA
storage: storage:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
@ -536,108 +538,30 @@ alertmanager:
# If you're migrating existing config, please make sure that `.Values.alertmanager.config`: # If you're migrating existing config, please make sure that `.Values.alertmanager.config`:
# - with `useManagedConfig: false` has structure described [here](https://prometheus.io/docs/alerting/latest/configuration/). # - with `useManagedConfig: false` has structure described [here](https://prometheus.io/docs/alerting/latest/configuration/).
# - with `useManagedConfig: true` has structure described [here](https://docs.victoriametrics.com/operator/api/#vmalertmanagerconfig). # - with `useManagedConfig: true` has structure described [here](https://docs.victoriametrics.com/operator/api/#vmalertmanagerconfig).
useManagedConfig: false useManagedConfig: true
# -- (object) Alertmanager configuration # -- (object) Alertmanager configuration
config: config:
route: route:
receiver: "blackhole" receiver: "blackhole"
# group_by: ["alertgroup", "job"] routes:
# group_wait: 30s - matchers:
# group_interval: 5m - severity=~"critical|major"
# repeat_interval: 12h receiver: outlook
# routes:
#
# # Duplicate code_owner routes to teams
# # These will send alerts to team channels but continue
# # processing through the rest of the tree to handled by on-call
# - matchers:
# - code_owner_channel!=""
# - severity=~"info|warning|critical"
# group_by: ["code_owner_channel", "alertgroup", "job"]
# receiver: slack-code-owners
#
# # Standard on-call routes
# - matchers:
# - severity=~"info|warning|critical"
# receiver: slack-monitoring
# continue: true
#
# inhibit_rules:
# - target_matchers:
# - severity=~"warning|info"
# source_matchers:
# - severity=critical
# equal:
# - cluster
# - namespace
# - alertname
# - target_matchers:
# - severity=info
# source_matchers:
# - severity=warning
# equal:
# - cluster
# - namespace
# - alertname
# - target_matchers:
# - severity=info
# source_matchers:
# - alertname=InfoInhibitor
# equal:
# - cluster
# - namespace
receivers: receivers:
- name: blackhole - name: blackhole
# - name: "slack-monitoring" - name: outlook
# slack_configs: email_configs:
# - channel: "#channel" - smarthost: 'mail.mms-support.de:465'
# send_resolved: true auth_username: 'ipcei-cis-devfw@mms-support.de'
# title: '{{ template "slack.monzo.title" . }}' auth_password:
# icon_emoji: '{{ template "slack.monzo.icon_emoji" . }}' name: email-user-credentials
# color: '{{ template "slack.monzo.color" . }}' key: connection-string
# text: '{{ template "slack.monzo.text" . }}' from: '"IPCEI CIS DevFW" <ipcei-cis-devfw@mms-support.de>'
# actions: to: 'f9f9953a.mg.telekom.de@de.teams.ms'
# - type: button headers:
# text: "Runbook :green_book:" subject: 'Grafana Mail Alerts'
# url: "{{ (index .Alerts 0).Annotations.runbook_url }}" require_tls: false
# - type: button
# text: "Query :mag:"
# url: "{{ (index .Alerts 0).GeneratorURL }}"
# - type: button
# text: "Dashboard :grafana:"
# url: "{{ (index .Alerts 0).Annotations.dashboard }}"
# - type: button
# text: "Silence :no_bell:"
# url: '{{ template "__alert_silence_link" . }}'
# - type: button
# text: '{{ template "slack.monzo.link_button_text" . }}'
# url: "{{ .CommonAnnotations.link_url }}"
# - name: slack-code-owners
# slack_configs:
# - channel: "#{{ .CommonLabels.code_owner_channel }}"
# send_resolved: true
# title: '{{ template "slack.monzo.title" . }}'
# icon_emoji: '{{ template "slack.monzo.icon_emoji" . }}'
# color: '{{ template "slack.monzo.color" . }}'
# text: '{{ template "slack.monzo.text" . }}'
# actions:
# - type: button
# text: "Runbook :green_book:"
# url: "{{ (index .Alerts 0).Annotations.runbook }}"
# - type: button
# text: "Query :mag:"
# url: "{{ (index .Alerts 0).GeneratorURL }}"
# - type: button
# text: "Dashboard :grafana:"
# url: "{{ (index .Alerts 0).Annotations.dashboard }}"
# - type: button
# text: "Silence :no_bell:"
# url: '{{ template "__alert_silence_link" . }}'
# - type: button
# text: '{{ template "slack.monzo.link_button_text" . }}'
# url: "{{ .CommonAnnotations.link_url }}"
#
# -- Better alert templates for [slack source](https://gist.github.com/milesbxf/e2744fc90e9c41b47aa47925f8ff6512) # -- Better alert templates for [slack source](https://gist.github.com/milesbxf/e2744fc90e9c41b47aa47925f8ff6512)
monzoTemplate: monzoTemplate:
enabled: true enabled: true
@ -880,7 +804,7 @@ grafana:
enabled: false enabled: false
# all values for grafana helm chart can be specified here # all values for grafana helm chart can be specified here
persistence: persistence:
enabled: true enabled: false
type: pvc type: pvc
storageClassName: "default" storageClassName: "default"
grafana.ini: grafana.ini:
@ -1096,7 +1020,7 @@ kubeApiServer:
# Component scraping the kube controller manager # Component scraping the kube controller manager
kubeControllerManager: kubeControllerManager:
# -- Enable kube controller manager metrics scraping # -- Enable kube controller manager metrics scraping
enabled: true enabled: false
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on # -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
endpoints: [] endpoints: []
@ -1229,7 +1153,7 @@ kubeEtcd:
# Component scraping kube scheduler # Component scraping kube scheduler
kubeScheduler: kubeScheduler:
# -- Enable KubeScheduler metrics scraping # -- Enable KubeScheduler metrics scraping
enabled: true enabled: false
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on # -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
endpoints: [] endpoints: []