Renamed kind to template for the OSC support

This commit is contained in:
Richard Robert Reitz 2024-11-25 19:18:23 +01:00
parent 14b8a03ccf
commit e23f30d881
96 changed files with 0 additions and 0 deletions

24
template/edfbuilder.yaml Normal file
View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: edfbuilder
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: registry
repoURL: 'https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot'
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -0,0 +1,24 @@
apiVersion: v2
name: forgejo-runner
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View file

@ -0,0 +1,7 @@
{{- if not .Values.registration.enabled}}
You will have to manually create a secret with the registration token, since you have not specified the registration token in the values.yaml file.
To create a secret with the registration token, run the following command:
kubectl create secret generic {{ include "forgejo-runner.fullname" . }}-token --from-literal=token=<token>
{{- end}}

View file

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "forgejo-runner.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "forgejo-runner.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "forgejo-runner.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "forgejo-runner.labels" -}}
helm.sh/chart: {{ include "forgejo-runner.chart" . }}
{{ include "forgejo-runner.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "forgejo-runner.selectorLabels" -}}
app.kubernetes.io/name: {{ include "forgejo-runner.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "forgejo-runner.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "forgejo-runner.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
{{- include "forgejo-runner.labels" . | nindent 4 }}
name: {{ include "forgejo-runner.fullname" . }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "forgejo-runner.selectorLabels" . | nindent 6 }}
strategy: {}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "forgejo-runner.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
volumes:
- name: docker-certs
emptyDir: {}
- name: runner-data
emptyDir: {}
initContainers:
- name: runner-register
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
command: ["forgejo-runner", "register", "--no-interactive", "--token", $(RUNNER_SECRET), "--name", $(RUNNER_NAME), "--instance", $(FORGEJO_INSTANCE_URL)]
env:
- name: RUNNER_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: RUNNER_SECRET
valueFrom:
secretKeyRef:
name: {{ include "forgejo-runner.fullname" . }}-token
key: token
- name: FORGEJO_INSTANCE_URL
value: {{ .Values.forgejoUrl }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: runner-data
mountPath: /data
containers:
- name: runner
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
command: ["sh", "-c", "while ! nc -z localhost 2376 </dev/null; do echo 'waiting for docker daemon...'; sleep 5; done; forgejo-runner daemon"]
env:
- name: DOCKER_HOST
value: tcp://localhost:2376
- name: DOCKER_CERT_PATH
value: /certs/client
- name: DOCKER_TLS_VERIFY
value: "1"
volumeMounts:
- name: docker-certs
mountPath: /certs
- name: runner-data
mountPath: /data
- name: daemon
image: docker:23.0.6-dind
env:
- name: DOCKER_TLS_CERTDIR
value: /certs
securityContext:
privileged: true
volumeMounts:
- name: docker-certs
mountPath: /certs

View file

@ -0,0 +1,13 @@
{{- if .Values.registration.enabled }}
# Secret data.
# You will need to retrive this from the web UI, and your Forgejo instance must be running v1.21+
# Alternatively, create this with
# kubectl create secret generic runner-secret --from-literal=token=your_offline_token_here
apiVersion: v1
stringData:
token: {{ .Values.registration.token }}
kind: Secret
metadata:
name: {{ include "forgejo-runner.fullname" . }}-token
namespace: {{ .Release.Namespace }}
{{- end }}

View file

@ -0,0 +1,45 @@
replicaCount: 2
image:
repository: code.forgejo.org/forgejo/runner
pullPolicy: IfNotPresent
tag: "3.5.1"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
registration:
enabled: false
token: ""
forgejoUrl: https://forgejo-domain

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: core
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: stacks/core
repoURL: 'https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot'
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: monitoring
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: stacks/monitoring
repoURL: 'https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot'
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ref-implementation
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: stacks/ref-implementation
repoURL: 'https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot'
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: second-cluster
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: stacks/second-cluster
repoURL: 'https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot'
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argocd
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: argocd
sources:
- repoURL: https://github.com/argoproj/argo-helm
path: charts/argo-cd
targetRevision: argo-cd-7.7.5
helm:
valueFiles:
- $values/stacks/core/argocd/values.yaml
- repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,44 @@
global:
domain: cnoe.localtest.me
configs:
params:
server.insecure: true
server.basehref: /argocd
cm:
application.resourceTrackingMethod: annotation
timeout.reconciliation: 60s
resource.exclusions: |
- apiGroups:
- "*"
kinds:
- ProviderConfigUsage
accounts.provider-argocd: apiKey
rbac:
policy.csv: 'g, provider-argocd, role:admin'
tls:
certificates:
notifications:
enabled: false
dex:
enabled: false
server:
ingress:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
path: /argocd(/|$)(.*)
pathType: ImplementationSpecific
extraTls:
- hosts:
- cnoe.localtest.me
secretName: argocd-net-tls

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane-compositions
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
path: stacks/core/crossplane-compositions
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot.git
targetRevision: HEAD
directory:
recurse: true

View file

@ -0,0 +1,397 @@
apiVersion: apiextensions.crossplane.io/v1
kind: Composition
metadata:
name: edfbuilders.edfbuilder.crossplane.io
spec:
writeConnectionSecretsToNamespace: crossplane-system
compositeTypeRef:
apiVersion: edfbuilder.crossplane.io/v1alpha1
kind: EDFBuilder
mode: Pipeline
pipeline:
- step: patch-and-transform
functionRef:
name: crossplane-contrib-function-patch-and-transform
input:
apiVersion: pt.fn.crossplane.io/v1beta1
kind: Resources
resources:
### shell provider config
- name: provider-shell
base:
apiVersion: shell.crossplane.io/v1alpha1
kind: ProviderConfig
spec:
credentials:
source: InjectedIdentity
patches:
- type: FromCompositeFieldPath
fromFieldPath: metadata.name
toFieldPath: metadata.name
readinessChecks:
- type: None
### bash-oneshot
- name: bash-oneshot
base:
apiVersion: provisioning.shell.crossplane.io/v1alpha1
kind: Bash
metadata:
name: bash-oneshot
spec:
forProvider:
script: |
# setup
DOMAIN=cnoe.localtest.me
#CLUSTER_NAME=$(openssl rand -hex 8)
CLUSTER_NAME=shoot
mkdir -p /tmp/rundir
export HOME=/tmp/rundir
cd
# get stacks folder
rm -Rf stacks &> /dev/null || true
git clone https://forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/stacks.git
# workdir for template helm values files
rm -Rf work &> /dev/null || true
cp -r stacks/kind work
rm -Rf stacks
# create namespaces
echo create namespaces
kubectl create namespace argo
kubectl create namespace argocd
kubectl create namespace gitea
kubectl create namespace ingress-nginx
# create and upload self signed certs
echo create and upload self signed certs
mkdir -p tls
if [[ ! -f tls/$DOMAIN.key || ! -f tls/$DOMAIN.crt ]]; then
openssl req -x509 -newkey rsa:4096 -keyout tls/$DOMAIN.key -out tls/$DOMAIN.crt -sha256 -days 3650 -nodes -subj "/C=AB/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=${DOMAIN}" -addext "subjectAltName=DNS:${DOMAIN},DNS:${DOMAIN}"
fi
if [[ ! -f tls/gitea.$DOMAIN.key || ! -f tls/gitea.$DOMAIN.crt ]]; then
openssl req -x509 -newkey rsa:4096 -keyout tls/gitea.$DOMAIN.key -out tls/gitea.$DOMAIN.crt -sha256 -days 3650 -nodes -subj "/C=AB/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=gitea.${DOMAIN}" -addext "subjectAltName=DNS:gitea.${DOMAIN},DNS:gitea.${DOMAIN}"
fi
kubectl create secret tls -n argocd argocd-net-tls --key tls/$DOMAIN.key --cert tls/$DOMAIN.crt
kubectl create secret tls -n gitea forgejo-net-tls --key tls/gitea.$DOMAIN.key --cert tls/gitea.$DOMAIN.crt
# add gitea certificate into argocd helm values
yq e -i ".configs.tls.certificates.\"gitea.$DOMAIN\" = load_str(\"tls/gitea.$DOMAIN.crt\")" work/stacks/core/argocd/values.yaml
# create a random giteaAdmin password
echo create giteaAdmin password
kubectl create secret generic -n gitea gitea-credential --from-literal=username=giteaAdmin "--from-literal=password=$(openssl rand -base64 16)"
# patch coredns
echo patch coredns
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-conf-custom
namespace: kube-system
data:
custom.conf: |
# insert custom rules here
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-conf-default
namespace: kube-system
data:
default.conf: |
# domain names resolves to ingress IP. e.g. gitea.cnoe.localtest.me becomes ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact gitea.cnoe.localtest.me ingress-nginx-controller.ingress-nginx.svc.cluster.local
rewrite name exact cnoe.localtest.me ingress-nginx-controller.ingress-nginx.svc.cluster.local
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
import ../coredns-configs/*.conf
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
EOF
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: kube-dns
name: coredns
namespace: kube-system
spec:
progressDeadlineSeconds: 600
replicas: 2
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.11.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: 8181
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
- mountPath: /etc/coredns-configs
name: custom-configs
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: coredns
serviceAccountName: coredns
terminationGracePeriodSeconds: 30
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- configMap:
defaultMode: 420
items:
- key: Corefile
path: Corefile
name: coredns
name: config-volume
- name: custom-configs
projected:
sources:
- configMap:
name: coredns-conf-custom
- configMap:
name: coredns-conf-default
EOF
kubectl rollout restart deployment/coredns -n kube-system
kubectl rollout status deployment/coredns -n kube-system --timeout=90000s
# install ingress-nginx
echo install ingress-nginx
rm -Rf ingress-nginx &> /dev/null
git clone https://github.com/kubernetes/ingress-nginx
cd ingress-nginx
git checkout helm-chart-4.11.3
cd ..
helm dependency update ./ingress-nginx/charts/ingress-nginx/
helm dependency build ./ingress-nginx/charts/ingress-nginx/
helm install -n ingress-nginx -f work/stacks/core/ingress-nginx/values.yaml ingress-nginx ./ingress-nginx/charts/ingress-nginx
rm -Rf ingress-nginx
# wait for ingress
sleep 5
kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90000s
# install argocd
echo install argocd
rm -Rf argo-helm &> /dev/null
git clone https://github.com/argoproj/argo-helm
cd argo-helm
git checkout argo-cd-7.7.5
cd ..
helm dependency update ./argo-helm/charts/argo-cd/
helm dependency build ./argo-helm/charts/argo-cd/
helm install -n argocd -f work/stacks/core/argocd/values.yaml argocd ./argo-helm/charts/argo-cd
rm -Rf argo-helm
# install forgejo
echo install forgejo
rm -Rf forgejo-helm &> /dev/null
git clone https://code.forgejo.org/forgejo-helm/forgejo-helm.git
cd forgejo-helm
git checkout v10.1.1
cd ..
helm dependency build ./forgejo-helm/
helm install -n gitea -f work/stacks/core/forgejo/values.yaml forgejo ./forgejo-helm
rm -Rf forgejo-helm
# wait for argocd
echo wait for argocd
HOST=$(kubectl get ingress -n argocd argocd-server -o yaml | yq -r .status.loadBalancer.ingress\[0\].hostname)
while [[ "$HOST" == "null" ]]
do
sleep 1
HOST=$(kubectl get ingress -n argocd argocd-server -o yaml | yq -r .status.loadBalancer.ingress\[0\].hostname)
done
# wait for forgejo
echo wait for forgejo
HOST=$(kubectl get ingress -n gitea forgejo -o yaml | yq -r .status.loadBalancer.ingress\[0\].hostname)
while [[ "$HOST" == "null" ]]
do
sleep 1
HOST=$(kubectl get ingress -n gitea forgejo -o yaml | yq -r .status.loadBalancer.ingress\[0\].hostname)
done
until curl -k --output /dev/null --silent --head --fail https://gitea.${DOMAIN}; do
sleep 1
done
# create the target git repository
GIT_USERNAME=giteaAdmin
GIT_PASSWORD=$(kubectl get secret -n gitea gitea-credential --output jsonpath="{.data.password}" | base64 --decode)
GIT_TOKEN=$(curl -sk -H "Content-Type: application/json" -d '{"name":"idpbuilder","scopes":["read:user","write:user","read:repository","write:repository","read:admin","write:admin"]}' -u $GIT_USERNAME:$GIT_PASSWORD https://gitea.$DOMAIN/api/v1/users/$GIT_USERNAME/tokens | jq -r .sha1)
curl -ks -X POST -H 'Content-Type: application/json' -d "{\"name\":\"edfbuilder-$CLUSTER_NAME\"}" "https://gitea.$DOMAIN/api/v1/user/repos?token=$GIT_TOKEN"
# create and apply a forgejo runner token
FORGEJO_RUNNER_TOKEN="$(curl -ks -H 'Content-Type: application/json' "https://gitea.$DOMAIN/api/v1/admin/runners/registration-token?token=$GIT_TOKEN" | jq -r .token)"
kubectl create secret generic -n gitea forgejo-runner-token "--from-literal=token=$FORGEJO_RUNNER_TOKEN"
echo repo created
git config --global user.email "bot@undefined.com"
git config --global user.name "Bot"
# upload templated deployment to git repository
cd work/
git init
git checkout -b main
git add -A
git commit -m "initial commit"
git remote add origin https://$GIT_USERNAME:${GIT_TOKEN}@gitea.$DOMAIN/giteaAdmin/edfbuilder-$CLUSTER_NAME.git
GIT_SSL_NO_VERIFY=true git push -u origin main
cd ..
# upload forgejo docker registry credentials for use in argo-workflows
cat <<EOF | kubectl create secret generic -n argo my-docker-secret --from-file=config.json=/dev/stdin
{
"auths": {
"https://gitea.cnoe.localtest.me": {
"auth": "$(echo -n giteaAdmin:$GIT_PASSWORD | base64)"
}
}
}
EOF
argocd login --grpc-web-root-path argocd cnoe.localtest.me --insecure --username admin --password "$(kubectl get secret -n argocd argocd-initial-admin-secret --output jsonpath="{.data.password}" | base64 --decode)"
# let argocd takeover core stack and install other stacks
kubectl apply -f work/registry/core.yaml
kubectl apply -f work/registry/ref-implementation.yaml
# remove templated deployment
rm -Rf work
# core packages installed, print passwords
echo done
echo
echo -n argocd admin password:\ ; kubectl get secret -n argocd argocd-initial-admin-secret --output jsonpath="{.data.password}" | base64 --decode; echo
echo https://$DOMAIN/argocd
echo
echo forgejo $GIT_USERNAME password: $GIT_PASSWORD token: $GIT_TOKEN
echo https://gitea.$DOMAIN
echo
echo execute ./get-passwords.sh to see all available passwords
echo
# done
exit 0
patches:
- type: FromCompositeFieldPath
fromFieldPath: metadata.name
toFieldPath: spec.providerConfigRef.name

View file

@ -0,0 +1,30 @@
apiVersion: apiextensions.crossplane.io/v1
kind: CompositeResourceDefinition
metadata:
name: edfbuilders.edfbuilder.crossplane.io
spec:
connectionSecretKeys:
- kubeconfig
group: edfbuilder.crossplane.io
names:
kind: EDFBuilder
listKind: EDFBuilderList
plural: edfbuilders
singular: edfbuilders
versions:
- name: v1alpha1
served: true
referenceable: true
schema:
openAPIV3Schema:
description: A EDFBuilder is a composite resource that represents a K8S Cluster with edfbuilder Installed
type: object
properties:
spec:
type: object
properties:
repoURL:
type: string
description: URL to ArgoCD stack of stacks repo
required:
- repoURL

View file

@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane-providers
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
path: stacks/core/crossplane-providers
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot.git
targetRevision: HEAD

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Function
metadata:
name: crossplane-contrib-function-patch-and-transform
spec:
package: xpkg.upbound.io/crossplane-contrib/function-patch-and-transform:v0.7.0
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,14 @@
apiVersion: argocd.crossplane.io/v1alpha1
kind: ProviderConfig
metadata:
name: argocd-provider
spec:
serverAddr: argocd-server.argocd.svc.cluster.local:80
insecure: true
plainText: true
credentials:
source: Secret
secretRef:
namespace: crossplane-system
name: argocd-credentials
key: authToken

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: provider-argocd
spec:
package: xpkg.upbound.io/crossplane-contrib/provider-argocd:v0.9.1
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,14 @@
apiVersion: kind.crossplane.io/v1alpha1
kind: ProviderConfig
metadata:
name: kind-provider
spec:
credentials:
source: Secret
secretRef:
namespace: crossplane-system
name: kind-credentials
key: credentials
endpoint:
# the url is managed by crossplane-edfbuilder
url: https://DOCKER_HOST:SERVER_PORT/api/v1/kindserver

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: richardrobertreitz-provider-kind
spec:
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-kind:v0.1.0
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,9 @@
apiVersion: pkg.crossplane.io/v1
kind: Provider
metadata:
name: richardrobertreitz-provider-shell
spec:
package: forgejo.edf-bootstrap.cx.fg1.ffm.osc.live/richardrobertreitz/provider-shell:v0.1.0
packagePullPolicy: IfNotPresent # Only download the package if it isnt in the cache.
revisionActivationPolicy: Automatic # Otherwise our Provider never gets activate & healthy
revisionHistoryLimit: 1

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: crossplane
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
chart: crossplane
repoURL: https://charts.crossplane.io/stable
targetRevision: 1.18.0
helm:
releaseName: crossplane

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo-runner
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: gitea
sources:
- repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
path: forgejo-runner
targetRevision: HEAD
helm:
valueFiles:
- $values/stacks/core/forgejo-runner/values.yaml
- repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,3 @@
replicaCount: 1
forgejoUrl: http://forgejo-http.gitea.svc.cluster.local:3000

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: forgejo
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: gitea
sources:
- repoURL: https://code.forgejo.org/forgejo-helm/forgejo-helm.git
path: .
targetRevision: v10.1.1
helm:
valueFiles:
- $values/stacks/core/forgejo/values.yaml
- repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,72 @@
redis-cluster:
enabled: false
postgresql:
enabled: false
postgresql-ha:
enabled: false
persistence:
enabled: true
size: 5Gi
test:
enabled: false
gitea:
admin:
existingSecret: gitea-credential
config:
database:
DB_TYPE: sqlite3
session:
PROVIDER: memory
cache:
ADAPTER: memory
queue:
TYPE: level
server:
DOMAIN: 'gitea.cnoe.localtest.me'
ROOT_URL: 'https://gitea.cnoe.localtest.me:443'
service:
ssh:
type: NodePort
nodePort: 32222
externalTrafficPolicy: Local
ingress:
# NOTE: The ingress is generated in a later step for path based routing feature See: hack/argo-cd/generate-manifests.sh
enabled: true
className: nginx
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: 512m
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
hosts:
- host: gitea.cnoe.localtest.me
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- gitea.cnoe.localtest.me
secretName: forgejo-net-tls
image:
pullPolicy: "IfNotPresent"
# Overrides the image tag whose default is the chart appVersion.
#tag: "8.0.3"
# Adds -rootless suffix to image name
rootless: true
forgejo:
runner:
enabled: true
image:
tag: latest
# replicas: 3
config:
runner:
labels:
- docker:docker://node:16-bullseye
- self-hosted:docker://ghcr.io/catthehacker/ubuntu:act-22.04
- ubuntu-22.04:docker://ghcr.io/catthehacker/ubuntu:act-22.04

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: ingress-nginx
sources:
- repoURL: https://github.com/kubernetes/ingress-nginx
path: charts/ingress-nginx
targetRevision: helm-chart-4.11.3
helm:
valueFiles:
- $values/stacks/core/ingress-nginx/values.yaml
- repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,36 @@
controller:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
hostPort:
enabled: true
terminationGracePeriodSeconds: 0
service:
type: NodePort
watchIngressWithoutClass: true
nodeSelector:
ingress-ready: "true"
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Equal"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Equal"
effect: "NoSchedule"
publishService:
enabled: false
extraArgs:
publish-status-address: localhost
# added for idpbuilder
enable-ssl-passthrough: ""
# added for idpbuilder
allowSnippetAnnotations: true
# added for idpbuilder
config:
proxy-buffer-size: 32k
use-forwarded-headers: "true"

View file

@ -0,0 +1,30 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kube-prometheus-stack
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
destination:
name: in-cluster
namespace: monitoring
sources:
- repoURL: https://github.com/prometheus-community/helm-charts
path: charts/kube-prometheus-stack
targetRevision: HEAD
helm:
valueFiles:
- $values/stacks/monitoring/kube-prometheus/values.yaml
- repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,20 @@
grafana:
namespaceOverride: "monitoring"
admin:
existingSecret: "kube-prometheus-stack-grafana-admin-password"
userKey: admin-user
passwordKey: admin-password
grafana.ini:
server:
domain: cnoe.localtest.me
root_url: "%(protocol)s://%(domain)s/grafana"
serve_from_sub_path: true
ingress:
enabled: true
ingressClassName: nginx
hosts:
- cnoe.localtest.me
path: /grafana

View file

@ -0,0 +1,146 @@
# Reference implementation
This example creates a local version of the CNOE reference implementation.
## Prerequisites
Ensure you have the following tools installed on your computer.
**Required**
- [idpbuilder](https://github.com/cnoe-io/idpbuilder/releases/latest): version `0.3.0` or later
- [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl): version `1.27` or later
- Your computer should have at least 6 GB RAM allocated to Docker. If you are on Docker Desktop, see [this guide](https://docs.docker.com/desktop/settings/mac/).
**Optional**
- AWS credentials: Access Key and secret Key. If you want to create AWS resources in one of examples below.
## Installation
**_NOTE:_**
- If you'd like to run this in your web browser through Codespaces, please follow [the instructions here](./codespaces.md) to install instead.
- _This example assumes that you run the reference implementation with the default port configguration of 8443 for the idpBuilder.
If you happen to configure a different host or port for the idpBuilder, the manifests in the reference example need to be updated
and be configured with the new host and port. you can use the [replace.sh](replace.sh) to change the port as desired prior to applying the manifest as instructed in the command above._
```bash
idpbuilder create --use-path-routing \
--package https://github.com/cnoe-io/stacks//ref-implementation
```
This will take ~6 minutes for everything to come up. To track the progress, you can go to the [ArgoCD UI](https://cnoe.localtest.me:8443/argocd/applications).
### What was installed?
1. **Argo Workflows** to enable workflow orchestrations.
1. **Backstage** as the UI for software catalog and templating. Source is available [here](https://github.com/cnoe-io/backstage-app).
1. **External Secrets** to generate secrets and coordinate secrets between applications.
1. **Keycloak** as the identity provider for applications.
1. **Spark Operator** to demonstrate an example Spark workload through Backstage.
If you don't want to install a package above, you can remove the ArgoCD Application file corresponding to the package you want to remove.
For example, if you want to remove Spark Operator, you can delete [this file](./spark-operator.yaml).
The only package that cannot be removed this way is Keycloak because other packages rely on it.
#### Accessing UIs
- Argo CD: https://cnoe.localtest.me:8443/argocd
- Argo Workflows: https://cnoe.localtest.me:8443/argo-workflows
- Backstage: https://cnoe.localtest.me:8443/
- Gitea: https://cnoe.localtest.me:8443/gitea
- Keycloak: https://cnoe.localtest.me:8443/keycloak/admin/master/console/
# Using it
For this example, we will walk through a few demonstrations. Once applications are ready, go to the [backstage URL](https://cnoe.localtest.me:8443).
Click on the Sign-In button, you will be asked to log into the Keycloak instance. There are two users set up in this
configuration, and their password can be retrieved with the following command:
```bash
idpbuilder get secrets
```
Use the username **`user1`** and the password value given by `USER_PASSWORD` field to login to the backstage instance.
`user1` is an admin user who has access to everything in the cluster, while `user2` is a regular user with limited access.
Both users use the same password retrieved above.
If you want to create a new user or change existing users:
1. Go to the [Keycloak UI](https://cnoe.localtest.me:8443/keycloak/admin/master/console/).
Login with the username `cnoe-admin`. Password is the `KEYCLOAK_ADMIN_PASSWORD` field from the command above.
2. Select `cnoe` from the realms drop down menu.
3. Select users tab.
## Basic Deployment
Let's start by deploying a simple application to the cluster through Backstage.
Click on the `Create...` button on the left, then select the `Create a Basic Deployment` template.
![img.png](images/backstage-templates.png)
In the next screen, type `demo` for the name field, then click Review, then Create.
Once steps run, click the Open In Catalog button to go to the entity page.
![img.png](images/basic-template-flow.png)
In the demo entity page, you will notice a ArgoCD overview card associated with this entity.
You can click on the ArgoCD Application name to see more details.
![img.png](images/demo-entity.png)
### What just happened?
1. Backstage created [a git repository](https://cnoe.localtest.me:8443/gitea/giteaAdmin/demo), then pushed templated contents to it.
2. Backstage created [an ArgoCD Application](https://cnoe.localtest.me:8443/argocd/applications/argocd/demo?) and pointed it to the git repository.
3. Backstage registered the application as [a component](https://cnoe.localtest.me:8443/gitea/giteaAdmin/demo/src/branch/main/catalog-info.yaml) in Backstage.
4. ArgoCD deployed the manifests stored in the repo to the cluster.
5. Backstage retrieved application health from ArgoCD API, then displayed it.
![image.png](images/basic-deployment.png)
## Argo Workflows and Spark Operator
In this example, we will deploy a simple Apache Spark job through Argo Workflows.
Click on the `Create...` button on the left, then select the `Basic Argo Workflow with a Spark Job` template.
![img.png](images/backstage-templates-spark.png)
Type `demo2` for the name field, then click create. You will notice that the Backstage templating steps are very similar to the basic example above.
Click on the Open In Catalog button to go to the entity page.
![img.png](images/demo2-entity.png)
Deployment processes are the same as the first example. Instead of deploying a pod, we deployed a workflow to create a Spark job.
In the entity page, there is a card for Argo Workflows, and it should say running or succeeded.
You can click the name in the card to go to the Argo Workflows UI to view more details about this workflow run.
When prompted to log in, click the login button under single sign on. Argo Workflows is configured to use SSO with Keycloak allowing you to login with the same credentials as Backstage login.
Note that Argo Workflows are not usually deployed this way. This is just an example to show you how you can integrate workflows, backstage, and spark.
Back in the entity page, you can view more details about Spark jobs by navigating to the Spark tab.
## Application with cloud resources.
To deploy cloud resources, you can follow any of the instructions below:
- [Cloud resource deployments via Crossplane](../crossplane-integrations/)
- [Cloud resource deployments via Terraform](../terraform-integrations/)
## Notes
- In these examples, we have used the pattern of creating a new repository for every app, then having ArgoCD deploy it.
This is done for convenience and demonstration purposes only. There are alternative actions that you can use.
For example, you can create a PR to an existing repository, create a repository but not deploy them yet, etc.
- If Backstage's pipelining and templating mechanisms is too simple, you can use more advanced workflow engines like Tekton or Argo Workflows.
You can invoke them in Backstage templates, then track progress similar to how it was described above.

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-workflows
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
path: "stacks/ref-implementation/argo-workflows/manifests/dev"
destination:
server: "https://kubernetes.default.svc"
namespace: argo
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,2 @@
resources:
- install.yaml

View file

@ -0,0 +1,20 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: keycloak-oidc
namespace: argo
spec:
secretStoreRef:
name: keycloak
kind: ClusterSecretStore
target:
name: keycloak-oidc
data:
- secretKey: client-id
remoteRef:
key: keycloak-clients
property: ARGO_WORKFLOWS_CLIENT_ID
- secretKey: secret-key
remoteRef:
key: keycloak-clients
property: ARGO_WORKFLOWS_CLIENT_SECRET

View file

@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argo-workflows-ingress
namespace: argo
annotations:
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
spec:
ingressClassName: "nginx"
rules:
- host: localhost
http:
paths:
- path: /argo-workflows(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: argo-server
port:
name: web
- host: cnoe.localtest.me
http:
paths:
- path: /argo-workflows(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: argo-server
port:
name: web

View file

@ -0,0 +1,8 @@
resources:
- ../base
- external-secret.yaml
- ingress.yaml
- sa-admin.yaml
patches:
- path: patches/cm-argo-workflows.yaml
- path: patches/deployment-argo-server.yaml

View file

@ -0,0 +1,26 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
namespace: argo
data:
config: |
sso:
insecureSkipVerify: true
issuer: https://cnoe.localtest.me/keycloak/realms/cnoe
clientId:
name: keycloak-oidc
key: client-id
clientSecret:
name: keycloak-oidc
key: secret-key
redirectUrl: https://cnoe.localtest.me:443/argo-workflows/oauth2/callback
rbac:
enabled: true
scopes:
- openid
- profile
- email
- groups
nodeEvents:
enabled: false

View file

@ -0,0 +1,30 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: argo-server
namespace: argo
annotations:
argocd.argoproj.io/sync-wave: "20"
spec:
template:
spec:
containers:
- name: argo-server
readinessProbe:
httpGet:
path: /
port: 2746
scheme: HTTP
env:
- name: BASE_HREF
value: "/argo-workflows/"
args:
- server
- --configmap=workflow-controller-configmap
- --auth-mode=client
- --auth-mode=sso
- "--secure=false"
- "--loglevel"
- "info"
- "--log-format"
- "text"

View file

@ -0,0 +1,32 @@
# Used by users in the admin group
# TODO Need to tighten up permissions.
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin
namespace: argo
annotations:
workflows.argoproj.io/rbac-rule: "'admin' in groups"
workflows.argoproj.io/rbac-rule-precedence: "10"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: argo-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin
namespace: argo
---
apiVersion: v1
kind: Secret
metadata:
name: admin.service-account-token
annotations:
kubernetes.io/service-account.name: admin
namespace: argo
type: kubernetes.io/service-account-token

View file

@ -0,0 +1,27 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: backstage-templates
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
path: "stacks/ref-implementation/backstage-templates/entities"
directory:
exclude: 'catalog-info.yaml'
destination:
server: "https://kubernetes.default.svc"
namespace: backstage
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1

View file

@ -0,0 +1,48 @@
apiVersion: backstage.io/v1alpha1
kind: Resource
metadata:
name: ${{values.name}}-bucket
description: Stores things
annotations:
argocd/app-name: ${{values.name | dump}}
spec:
type: s3-bucket
owner: guests
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: ${{values.name | dump}}
description: This is for testing purposes
annotations:
backstage.io/techdocs-ref: dir:.
backstage.io/kubernetes-label-selector: 'entity-id=${{values.name}}'
backstage.io/kubernetes-namespace: default
argocd/app-name: ${{values.name | dump}}
links:
- url: https://gitea.cnoe.localtest.me:443
title: Repo URL
icon: github
spec:
owner: guests
lifecycle: experimental
type: service
system: ${{values.name | dump}}
dependsOn:
- resource:default/${{values.name}}-bucket
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: ${{values.name | dump}}
description: An example system for demonstration purposes
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/cnoe-io/stacks/tree/main/ref-implementation
title: CNOE Repo
icon: github
spec:
owner: guests
lifecycle: experimental
type: service

View file

@ -0,0 +1,46 @@
[![Codespell][codespell-badge]][codespell-link]
[![E2E][e2e-badge]][e2e-link]
[![Go Report Card][report-badge]][report-link]
[![Commit Activity][commit-activity-badge]][commit-activity-link]
# IDP Builder
Internal development platform binary launcher.
> **WORK IN PROGRESS**: This tool is in a pre-release stage and is under active development.
## About
Spin up a complete internal developer platform using industry standard technologies like Kubernetes, Argo, and backstage with only Docker required as a dependency.
This can be useful in several ways:
* Create a single binary which can demonstrate an IDP reference implementation.
* Use within CI to perform integration testing.
* Use as a local development environment for platform engineers.
## Getting Started
Checkout our [documentation website](https://cnoe.io/docs/reference-implementation/installations/idpbuilder) for getting started with idpbuilder.
## Community
- If you have questions or concerns about this tool, please feel free to reach out to us on the [CNCF Slack Channel](https://cloud-native.slack.com/archives/C05TN9WFN5S).
- You can also join our community meetings to meet the team and ask any questions. Checkout [this calendar](https://calendar.google.com/calendar/embed?src=064a2adfce866ccb02e61663a09f99147f22f06374e7a8994066bdc81e066986%40group.calendar.google.com&ctz=America%2FLos_Angeles) for more information.
## Contribution
Checkout the [contribution doc](./CONTRIBUTING.md) for contribution guidelines and more information on how to set up your local environment.
<!-- JUST BADGES & LINKS -->
[codespell-badge]: https://github.com/cnoe-io/idpbuilder/actions/workflows/codespell.yaml/badge.svg
[codespell-link]: https://github.com/cnoe-io/idpbuilder/actions/workflows/codespell.yaml
[e2e-badge]: https://github.com/cnoe-io/idpbuilder/actions/workflows/e2e.yaml/badge.svg
[e2e-link]: https://github.com/cnoe-io/idpbuilder/actions/workflows/e2e.yaml
[report-badge]: https://goreportcard.com/badge/github.com/cnoe-io/idpbuilder
[report-link]: https://goreportcard.com/report/github.com/cnoe-io/idpbuilder
[commit-activity-badge]: https://img.shields.io/github/commit-activity/m/cnoe-io/idpbuilder
[commit-activity-link]: https://github.com/cnoe-io/idpbuilder/pulse

View file

@ -0,0 +1,16 @@
![cnoe logo](./images/cnoe-logo.png)
# Example Basic Application
Thanks for trying out this demo! In this example, we deployed a simple application with a S3 bucket using Crossplane.
### idpbuilder
Checkout the idpbuilder website: https://cnoe.io/docs/reference-implementation/installations/idpbuilder
Checkout the idpbuilder repository: https://github.com/cnoe-io/idpbuilder
## Crossplane
Checkout the Crossplane website: https://www.crossplane.io/

View file

@ -0,0 +1,3 @@
module ${{ values.name }}
go 1.19

View file

@ -0,0 +1,3 @@
resources:
- nginx.yaml
- ${{ values.name }}.yaml

View file

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
targetPort: 80
selector:
app: nginx

View file

@ -0,0 +1,35 @@
{%- if values.awsResources %}
resources:
{%- if 'Bucket' in values.awsResources %}
- ../base/
{%- endif %}
{%- if 'Table' in values.awsResources %}
- ../base/table.yaml
{%- endif %}
{%- endif %}
namespace: default
patches:
- target:
kind: Deployment
patch: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: not-used
labels:
backstage.io/kubernetes-id: ${{values.name}}
spec:
template:
metadata:
labels:
backstage.io/kubernetes-id: ${{values.name}}
- target:
kind: Service
patch: |
apiVersion: apps/v1
kind: Service
metadata:
name: not-used
labels:
backstage.io/kubernetes-id: ${{values.name}}

View file

@ -0,0 +1,5 @@
package main
func main() {
}

View file

@ -0,0 +1,6 @@
site_name: 'Argo Spark Example'
nav:
- Home: index.md
- idpBuilder: idpbuilder.md
plugins:
- techdocs-core

View file

@ -0,0 +1,126 @@
apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
description: Adds a Go application with AWS resources
name: app-with-aws-resources
title: Add a Go App with AWS resources
spec:
owner: guests
type: service
parameters:
- properties:
name:
title: Application Name
type: string
description: Unique name of the component
ui:autofocus: true
labels:
title: Labels
type: object
additionalProperties:
type: string
description: Labels to apply to the application
ui:autofocus: true
required:
- name
title: Choose your repository location
- description: Configure your bucket
properties:
apiVersion:
default: awsblueprints.io/v1alpha1
description: APIVersion for the resource
type: string
kind:
default: ObjectStorage
description: Kind for the resource
type: string
config:
description: ObjectStorageSpec defines the desired state of ObjectStorage
properties:
resourceConfig:
description: ResourceConfig defines general properties of this AWS resource.
properties:
deletionPolicy:
description: Defaults to Delete
enum:
- Delete
- Orphan
type: string
region:
type: string
providerConfigName:
type: string
default: default
tags:
items:
properties:
key:
type: string
value:
type: string
required:
- key
- value
type: object
type: array
required:
- region
type: object
required:
- resourceConfig
title: Bucket configuration options
type: object
steps:
- id: template
name: Generating component
action: fetch:template
input:
url: ./skeleton
values:
name: ${{parameters.name}}
- action: roadiehq:utils:serialize:yaml
id: serialize
input:
data:
apiVersion: awsblueprints.io/v1alpha1
kind: ${{ parameters.kind }}
metadata:
name: ${{ parameters.name }}
spec: ${{ parameters.config }}
name: serialize
- action: roadiehq:utils:fs:write
id: write
input:
content: ${{ steps['serialize'].output.serialized }}
path: kustomize/base/${{ parameters.name }}.yaml
name: write-to-file
- id: publish
name: Publishing to a gitea git repository
action: publish:gitea
input:
description: This is an example app
# Hard coded value for this demo purposes only.
repoUrl: gitea.cnoe.localtest.me:443/?repo=${{parameters.name}}
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
action: cnoe:create-argocd-app
input:
appName: ${{parameters.name}}
appNamespace: default
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://gitea.cnoe.localtest.me:443/giteaAdmin/${{parameters.name}}
path: "kustomize/base"
- id: register
name: Register
action: catalog:register
input:
repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }}
catalogInfoPath: 'catalog-info.yaml'
output:
links:
- title: Open in catalog
icon: catalog
entityRef: ${{ steps['register'].output.entityRef }}

View file

@ -0,0 +1,40 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: ${{values.name | dump}}
description: This is an example Backstage component representing the use of Argo Workflows and Spark Operator.
annotations:
backstage.io/techdocs-ref: dir:.
backstage.io/kubernetes-label-selector: 'entity-id=${{values.name}}'
backstage.io/kubernetes-namespace: argo
argocd/app-name: ${{values.name | dump}}
argo-workflows.cnoe.io/label-selector: env=dev,entity-id=${{values.name}}
argo-workflows.cnoe.io/cluster-name: local
apache-spark.cnoe.io/label-selector: env=dev,entity-id=${{values.name}}
apache-spark.cnoe.io/cluster-name: local
links:
- url: https://gitea.cnoe.localtest.me:443
title: Repo URL
icon: github
spec:
owner: guests
lifecycle: experimental
type: service
system: ${{values.name | dump}}
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: ${{values.name | dump}}
description: An example system for demonstration purposes
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/cnoe-io/stacks/tree/main/ref-implementation
title: CNOE Repo
icon: github
spec:
owner: guests
lifecycle: experimental
type: service

View file

@ -0,0 +1,160 @@
<!-- markdownlint-disable-next-line MD041 -->
[![Security Status](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml/badge.svg?branch=main)](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml?query=branch%3Amain)
[![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows)
[![FOSSA License Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows?ref=badge_shield)
[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj)
[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/)
[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-workflows?label=argo-workflows)](https://github.com/argoproj/argo-workflows/releases/latest)
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows)
## What is Argo Workflows?
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes.
Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
* Define workflows where each step is a container.
* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic graph (DAG).
* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes.
Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated project.
## Use Cases
* [Machine Learning pipelines](use-cases/machine-learning.md)
* [Data and batch processing](use-cases/data-processing.md)
* [Infrastructure automation](use-cases/infrastructure-automation.md)
* [CI/CD](use-cases/ci-cd.md)
* [Other use cases](use-cases/other.md)
## Why Argo Workflows?
* Argo Workflows is the most popular workflow execution engine for Kubernetes.
* Light-weight, scalable, and easier to use.
* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments.
* Cloud agnostic and can run on any Kubernetes cluster.
[Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543)
## Try Argo Workflows
You can try Argo Workflows via one of the following:
1. [Interactive Training Material](https://killercoda.com/argoproj/course/argo-workflows/)
1. [Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo)
![Screenshot](assets/screenshot.png)
## Who uses Argo Workflows?
[About 200+ organizations are officially using Argo Workflows](https://github.com/argoproj/argo-workflows/blob/main/USERS.md)
## Ecosystem
Just some of the projects that use or rely on Argo Workflows (complete list [here](https://github.com/akuity/awesome-argo#ecosystem-projects)):
* [Argo Events](https://github.com/argoproj/argo-events)
* [Couler](https://github.com/couler-proj/couler)
* [Hera](https://github.com/argoproj-labs/hera-workflows)
* [Katib](https://github.com/kubeflow/katib)
* [Kedro](https://kedro.readthedocs.io/en/stable/)
* [Kubeflow Pipelines](https://github.com/kubeflow/pipelines)
* [Netflix Metaflow](https://metaflow.org)
* [Onepanel](https://github.com/onepanelio/onepanel)
* [Orchest](https://github.com/orchest/orchest/)
* [Piper](https://github.com/quickube/piper)
* [Ploomber](https://github.com/ploomber/ploomber)
* [Seldon](https://github.com/SeldonIO/seldon-core)
* [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
## Client Libraries
Check out our [Java, Golang and Python clients](client-libraries.md).
## Quickstart
* [Get started here](quick-start.md)
* [Walk-through examples](walk-through/index.md)
## Documentation
You're here!
## Features
An incomplete list of features Argo Workflows provide:
* UI to visualize and manage Workflows
* Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw)
* Workflow templating to store commonly used Workflows in the cluster
* Archiving Workflows after executing for later access
* Scheduled workflows using cron
* Server interface with REST API (HTTP and GRPC)
* DAG or Steps based declaration of workflows
* Step level input & outputs (artifacts/parameters)
* Loops
* Parameterization
* Conditionals
* Timeouts (step & workflow level)
* Retry (step & workflow level)
* Resubmit (memoized)
* Suspend & Resume
* Cancellation
* K8s resource orchestration
* Exit Hooks (notifications, cleanup)
* Garbage collection of completed workflow
* Scheduling (affinity/tolerations/node selectors)
* Volumes (ephemeral/existing)
* Parallelism limits
* Daemoned steps
* DinD (docker-in-docker)
* Script steps
* Event emission
* Prometheus metrics
* Multiple executors
* Multiple pod and workflow garbage collection strategies
* Automatically calculated resource usage per step
* Java/Golang/Python SDKs
* Pod Disruption Budget support
* Single-sign on (OAuth2/OIDC)
* Webhook triggering
* CLI
* Out-of-the box and custom Prometheus metrics
* Windows container support
* Embedded widgets
* Multiplex log viewer
## Community Meetings
We host monthly community meetings where we and the community showcase demos and discuss the current and future state of the project. Feel free to join us!
For Community Meeting information, minutes and recordings, please [see here](https://bit.ly/argo-wf-cmty-mtng).
Participation in Argo Workflows is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
## Community Blogs and Presentations
* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
* [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)
* [Argo Workflows and Pipelines - CI/CD, Machine Learning, and Other Kubernetes Workflows](https://youtu.be/UMaivwrAyTA)
* [Argo Ansible role: Provisioning Argo Workflows on OpenShift](https://medium.com/@marekermk/provisioning-argo-on-openshift-with-ansible-and-kustomize-340a1fda8b50)
* [Argo Workflows vs Apache Airflow](http://bit.ly/30YNIvT)
* [CI/CD with Argo on Kubernetes](https://medium.com/@bouwe.ceunen/ci-cd-with-argo-on-kubernetes-28c1a99616a9)
* [Define Your CI/CD Pipeline with Argo Workflows](https://haque-zubair.medium.com/define-your-ci-cd-pipeline-with-argo-workflows-25aefb02fa63)
* [Distributed Machine Learning Patterns from Manning Publication](https://github.com/terrytangyuan/distributed-ml-patterns)
* [Running Argo Workflows Across Multiple Kubernetes Clusters](https://admiralty.io/blog/running-argo-workflows-across-multiple-kubernetes-clusters/)
* [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/)
* [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/)
* [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html)
* TGI Kubernetes with Joe Beda: [Argo workflow system](https://www.youtube.com/watch?v=M_rxPPLG8pU&start=859)
## Project Resources
* [Argo Project GitHub organization](https://github.com/argoproj)
* [Argo Website](https://argoproj.github.io/)
* [Argo Slack](https://argoproj.github.io/community/join-slack)
## Security
See [Security](security.md).

View file

@ -0,0 +1,9 @@
![cnoe logo](./images/cnoe-logo.png)
# Example Spark Application
Thanks for trying out this demo! In this example, we deployed a simple Apache Spark job through Argo Workflows.
To learn more about Spark Operators, check out [this link](https://github.com/kubeflow/spark-operator)
To learn more about Argo Workflows, see [this link](https://argoproj.github.io/workflows/)

View file

@ -0,0 +1,86 @@
# Kubeflow Spark Operator
[![Go Report Card](https://goreportcard.com/badge/github.com/kubeflow/spark-operator)](https://goreportcard.com/report/github.com/kubeflow/spark-operator)
## What is Spark Operator?
The Kubernetes Operator for Apache Spark aims to make specifying and running [Spark](https://github.com/apache/spark) applications as easy and idiomatic as running other workloads on Kubernetes. It uses
[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) for specifying, running, and surfacing status of Spark applications.
## Overview
For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [Architecture](https://www.kubeflow.org/docs/components/spark-operator/overview/#architecture). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend.
The Kubernetes Operator for Apache Spark currently supports the following list of features:
* Supports Spark 2.3 and up.
* Enables declarative application specification and management of applications through custom resources.
* Automatically runs `spark-submit` on behalf of users for each `SparkApplication` eligible for submission.
* Provides native [cron](https://en.wikipedia.org/wiki/Cron) support for running scheduled applications.
* Supports customization of Spark pods beyond what Spark natively is able to do through the mutating admission webhook, e.g., mounting ConfigMaps and volumes, and setting pod affinity/anti-affinity.
* Supports automatic application re-submission for updated `SparkApplication` objects with updated specification.
* Supports automatic application restart with a configurable restart policy.
* Supports automatic retries of failed submissions with optional linear back-off.
* Supports mounting local Hadoop configuration as a Kubernetes ConfigMap automatically via `sparkctl`.
* Supports automatically staging local application dependencies to Google Cloud Storage (GCS) via `sparkctl`.
* Supports collecting and exporting application-level metrics and driver/executor metrics to Prometheus.
## Project Status
**Project status:** *beta*
**Current API version:** *`v1beta2`*
**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/<version>"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f config/crd/bases`.**
## Prerequisites
* Version >= 1.13 of Kubernetes to use the [`subresource` support for CustomResourceDefinitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#subresources), which became beta in 1.13 and is enabled by default in 1.13 and higher.
* Version >= 1.16 of Kubernetes to use the `MutatingWebhook` and `ValidatingWebhook` of `apiVersion: admissionregistration.k8s.io/v1`.
## Getting Started
For getting started with Spark operator, please refer to [Getting Started](https://www.kubeflow.org/docs/components/spark-operator/getting-started/).
## User Guide
For detailed user guide and API documentation, please refer to [User Guide](https://www.kubeflow.org/docs/components/spark-operator/user-guide/) and [API Specification](docs/api-docs.md).
If you are running Spark operator on Google Kubernetes Engine (GKE) and want to use Google Cloud Storage (GCS) and/or BigQuery for reading/writing data, also refer to the [GCP guide](https://www.kubeflow.org/docs/components/spark-operator/user-guide/gcp/).
## Version Matrix
The following table lists the most recent few versions of the operator.
| Operator Version | API Version | Kubernetes Version | Base Spark Version |
| ------------- | ------------- | ------------- | ------------- |
| `v1beta2-1.6.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
| `v1beta2-1.5.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
| `v1beta2-1.4.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` |
| `v1beta2-1.3.x-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` |
| `v1beta2-1.2.3-3.1.1` | `v1beta2` | 1.13+ | `3.1.1` |
| `v1beta2-1.2.2-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
| `v1beta2-1.2.1-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
| `v1beta2-1.2.0-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` |
| `v1beta2-1.1.x-2.4.5` | `v1beta2` | 1.13+ | `2.4.5` |
| `v1beta2-1.0.x-2.4.4` | `v1beta2` | 1.13+ | `2.4.4` |
## Developer Guide
For developing with Spark Operator, please refer to [Developer Guide](https://www.kubeflow.org/docs/components/spark-operator/developer-guide/).
## Contributor Guide
For contributing to Spark Operator, please refer to [Contributor Guide](CONTRIBUTING.md).
## Community
* Join the [CNCF Slack Channel](https://www.kubeflow.org/docs/about/community/#kubeflow-slack-channels) and then join `#kubeflow-spark-operator` Channel.
* Check out our blog post [Announcing the Kubeflow Spark Operator: Building a Stronger Spark on Kubernetes Community](https://blog.kubeflow.org/operators/2024/04/15/kubeflow-spark-operator.html).
* Join our monthly community meeting [Kubeflow Spark Operator Meeting Notes](https://bit.ly/3VGzP4n).
## Adopters
Check out [adopters of Spark Operator](ADOPTERS.md).

View file

@ -0,0 +1,109 @@
# apiVersion: argoproj.io/v1alpha1
# kind: Workflow
# metadata:
# name: ${{values.name}}
# namespace: argo
# labels:
# env: dev
# entity-id: ${{values.name}}
# spec:
# serviceAccountName: admin
# entrypoint: whalesay
# templates:
# - name: whalesay
# container:
# image: docker/whalesay:latest
# command: [cowsay]
# args: ["hello world"]
---
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
name: ${{values.name}}
namespace: argo
labels:
env: dev
entity-id: ${{values.name}}
spec:
serviceAccountName: admin
entrypoint: main
action: create
templates:
- name: main
steps:
- - name: spark-job
template: spark-job
- - name: wait
template: wait
arguments:
parameters:
- name: spark-job-name
value: '{{steps.spark-job.outputs.parameters.spark-job-name}}'
- name: wait
inputs:
parameters:
- name: spark-job-name
resource:
action: get
successCondition: status.applicationState.state == COMPLETED
failureCondition: status.applicationState.state == FAILED
manifest: |
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
name: {{inputs.parameters.spark-job-name}}
namespace: argo
- name: spark-job
outputs:
parameters:
- name: spark-job-name
valueFrom:
jsonPath: '{.metadata.name}'
resource:
action: create
setOwnerReference: true
manifest: |
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
name: spark-pi-${{values.name}}
namespace: argo
labels:
env: dev
entity-id: ${{values.name}}
spec:
type: Scala
mode: cluster
image: "docker.io/apache/spark:v3.1.3"
imagePullPolicy: IfNotPresent
mainClass: org.apache.spark.examples.SparkPi
mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.3.jar"
sparkVersion: "3.1.1"
restartPolicy:
type: Never
volumes:
- name: "test-volume"
hostPath:
path: "/tmp"
type: Directory
driver:
cores: 1
coreLimit: "1200m"
memory: "512m"
labels:
version: 3.1.1
serviceAccount: admin
volumeMounts:
- name: "test-volume"
mountPath: "/tmp"
executor:
cores: 1
instances: 1
memory: "512m"
labels:
version: 3.1.1
volumeMounts:
- name: "test-volume"
mountPath: "/tmp"

View file

@ -0,0 +1,8 @@
site_name: 'Argo Spark Example'
nav:
- Home: index.md
- Argo-Workflows: argo-workflows.md
- Apache Spark Operator: spark-operator.md
plugins:
- techdocs-core

View file

@ -0,0 +1,62 @@
apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
description: Creates a Basic Kubernetes Deployment
name: argo-workflows-basic
title: Basic Argo Workflow with a Spark Job
spec:
owner: guests
type: service
parameters:
- title: Configuration Options
required:
- name
properties:
name:
type: string
description: name of this application
mainApplicationFile:
type: string
default: 'local:///opt/spark/examples/jars/spark-examples_2.12-3.1.3.jar'
description: Path to the main application file
steps:
- id: template
name: Generating component
action: fetch:template
input:
url: ./skeleton
values:
name: ${{parameters.name}}
- id: publish
name: Publishing to a gitea git repository
action: publish:gitea
input:
description: This is an example app
# Hard coded value for this demo purposes only.
repoUrl: gitea.cnoe.localtest.me:443/?repo=${{parameters.name}}
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
action: cnoe:create-argocd-app
input:
appName: ${{parameters.name}}
appNamespace: ${{parameters.name}}
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://gitea.cnoe.localtest.me:443/giteaAdmin/${{parameters.name}}
path: "manifests"
- id: register
name: Register
action: catalog:register
input:
repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }}
catalogInfoPath: 'catalog-info.yaml'
output:
links:
- title: Open in catalog
icon: catalog
entityRef: ${{ steps['register'].output.entityRef }}

View file

@ -0,0 +1,6 @@
site_name: 'Argo Spark Example'
nav:
- Home: index.md
- idpBuilder: idpbuilder.md
plugins:
- techdocs-core

View file

@ -0,0 +1,36 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: ${{values.name | dump}}
description: This is a basic example application
annotations:
backstage.io/techdocs-ref: dir:.
backstage.io/kubernetes-label-selector: 'entity-id=${{values.name}}'
backstage.io/kubernetes-namespace: default
argocd/app-name: ${{values.name | dump}}
links:
- url: https://gitea.cnoe.localtest.me:443
title: Repo URL
icon: github
spec:
owner: guests
lifecycle: experimental
type: service
system: ${{values.name | dump}}
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: ${{values.name | dump}}
description: An example system for demonstration purposes
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://github.com/cnoe-io/stacks/tree/main/ref-implementation
title: CNOE Repo
icon: github
spec:
owner: guests
lifecycle: experimental
type: service

View file

@ -0,0 +1,46 @@
[![Codespell][codespell-badge]][codespell-link]
[![E2E][e2e-badge]][e2e-link]
[![Go Report Card][report-badge]][report-link]
[![Commit Activity][commit-activity-badge]][commit-activity-link]
# IDP Builder
Internal development platform binary launcher.
> **WORK IN PROGRESS**: This tool is in a pre-release stage and is under active development.
## About
Spin up a complete internal developer platform using industry standard technologies like Kubernetes, Argo, and backstage with only Docker required as a dependency.
This can be useful in several ways:
* Create a single binary which can demonstrate an IDP reference implementation.
* Use within CI to perform integration testing.
* Use as a local development environment for platform engineers.
## Getting Started
Checkout our [documentation website](https://cnoe.io/docs/reference-implementation/installations/idpbuilder) for getting started with idpbuilder.
## Community
- If you have questions or concerns about this tool, please feel free to reach out to us on the [CNCF Slack Channel](https://cloud-native.slack.com/archives/C05TN9WFN5S).
- You can also join our community meetings to meet the team and ask any questions. Checkout [this calendar](https://calendar.google.com/calendar/embed?src=064a2adfce866ccb02e61663a09f99147f22f06374e7a8994066bdc81e066986%40group.calendar.google.com&ctz=America%2FLos_Angeles) for more information.
## Contribution
Checkout the [contribution doc](./CONTRIBUTING.md) for contribution guidelines and more information on how to set up your local environment.
<!-- JUST BADGES & LINKS -->
[codespell-badge]: https://github.com/cnoe-io/idpbuilder/actions/workflows/codespell.yaml/badge.svg
[codespell-link]: https://github.com/cnoe-io/idpbuilder/actions/workflows/codespell.yaml
[e2e-badge]: https://github.com/cnoe-io/idpbuilder/actions/workflows/e2e.yaml/badge.svg
[e2e-link]: https://github.com/cnoe-io/idpbuilder/actions/workflows/e2e.yaml
[report-badge]: https://goreportcard.com/badge/github.com/cnoe-io/idpbuilder
[report-link]: https://goreportcard.com/report/github.com/cnoe-io/idpbuilder
[commit-activity-badge]: https://img.shields.io/github/commit-activity/m/cnoe-io/idpbuilder
[commit-activity-link]: https://github.com/cnoe-io/idpbuilder/pulse

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

View file

@ -0,0 +1,11 @@
![cnoe logo](./images/cnoe-logo.png)
# Example Basic Application
Thanks for trying out this demo! In this example, we deployed a simple application.
### idpbuilder
Checkout idpbuilder website: https://cnoe.io/docs/reference-implementation/installations/idpbuilder
Checkout idpbuilder repository: https://github.com/cnoe-io/idpbuilder

View file

@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ${{values.name | dump}}
namespace: default
labels:
entity-id: ${{values.name}}
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
entity-id: ${{values.name}}
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80

View file

@ -0,0 +1,58 @@
apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
description: Creates a Basic Kubernetes Deployment
name: basic
title: Create a Basic Deployment
spec:
owner: guests
type: service
parameters:
- title: Configuration Options
required:
- name
properties:
name:
type: string
description: name of this application
steps:
- id: template
name: Generating component
action: fetch:template
input:
url: ./skeleton
values:
name: ${{parameters.name}}
- id: publish
name: Publishing to a gitea git repository
action: publish:gitea
input:
description: This is an example app
# Hard coded value for this demo purposes only.
repoUrl: gitea.cnoe.localtest.me:443/?repo=${{parameters.name}}
defaultBranch: main
- id: create-argocd-app
name: Create ArgoCD App
action: cnoe:create-argocd-app
input:
appName: ${{parameters.name}}
appNamespace: ${{parameters.name}}
argoInstance: in-cluster
projectName: default
# necessary until we generate our own cert
repoUrl: https://gitea.cnoe.localtest.me:443/giteaAdmin/${{parameters.name}}
path: "manifests"
- id: register
name: Register
action: catalog:register
input:
repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }}
catalogInfoPath: 'catalog-info.yaml'
output:
links:
- title: Open in catalog
icon: catalog
entityRef: ${{ steps['register'].output.entityRef }}

View file

@ -0,0 +1,20 @@
apiVersion: backstage.io/v1alpha1
kind: Location
metadata:
name: basic-example-templates
description: A collection of example templates
spec:
targets:
- ./basic/template.yaml
- ./argo-workflows/template.yaml
- ./app-with-bucket/template.yaml
- ./demo-go-hello-world/template.yaml
---
apiVersion: backstage.io/v1alpha1
kind: Location
metadata:
name: basic-organization
description: Basic organization data
spec:
targets:
- ./organization/guests.yaml

View file

@ -0,0 +1,35 @@
---
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: ${{ values.name }}
description: This is a Backstage component created from the custom template that creates Hello-World example
annotations:
backstage.io/techdocs-ref: dir:.
backstage.io/kubernetes-label-selector: 'entity-id=${{ values.name }}'
backstage.io/kubernetes-namespace: gitea
links:
- url: https://gitea.cnoe.localtest.me:443
title: Repo URL
icon: git
spec:
owner: guests
lifecycle: experimental
type: service
system: ${{ values.name | dump }}
---
apiVersion: backstage.io/v1alpha1
kind: System
metadata:
name: ${{ values.name | dump }}
description: A system for managing services created from the Gitea template.
annotations:
backstage.io/techdocs-ref: dir:.
links:
- url: https://gitea.cnoe.localtest.me:443
title: Gitea Repo
icon: git
spec:
owner: guests
lifecycle: experimental
type: service

View file

@ -0,0 +1,20 @@
package main
import (
"fmt"
"net/http"
)
func helloHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello World")
}
func main() {
http.HandleFunc("/", helloHandler)
fmt.Println("Server is running on port 8081...")
err := http.ListenAndServe(":8081", nil)
if err != nil {
fmt.Println("Error starting the server:", err)
}
}

View file

@ -0,0 +1,54 @@
apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
name: example-go-template
title: Example Go template
description: An example template for the scaffolder that creates a simple go service
spec:
owner: user:guest
type: service
parameters:
- title: Fill in some steps
required:
- name
properties:
name:
title: Name
type: string
description: Unique name of the component
ui:autofocus: true
steps:
- id: fetch-template
name: Fetch Template
action: fetch:template
input:
url: ./skeleton
targetPath: ./skeleton
values:
name: ${{ parameters.name }}
- id: publish
name: Publish to Gitea
action: publish:gitea
input:
repoUrl: gitea.cnoe.localtest.me:443/?repo=${{parameters.name}}
description: This is the repository for ${{ parameters.name }}
sourcePath: ./skeleton
defaultBranch: main
- id: register
name: Register in Catalog
action: catalog:register
input:
repoContentsUrl: ${{ steps['publish'].output.repoContentsUrl }}
catalogInfoPath: 'catalog-info.yaml'
output:
links:
- title: Repository
url: ${{ steps['publish'].output.remoteUrl }}
- title: Open in Catalog
icon: catalog
entityRef: ${{ steps['register'].output.entityRef }}

View file

@ -0,0 +1,15 @@
---
apiVersion: backstage.io/v1alpha1
kind: User
metadata:
name: guest
spec:
memberOf: [guests]
---
apiVersion: backstage.io/v1alpha1
kind: Group
metadata:
name: guests
spec:
type: team
children: []

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: backstage
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
path: "stacks/ref-implementation/backstage/manifests"
destination:
server: "https://kubernetes.default.svc"
namespace: backstage
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true
retry:
limit: -1

View file

@ -0,0 +1,77 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eso-store
namespace: argocd
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: eso-store
namespace: argocd
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- authorization.k8s.io
resources:
- selfsubjectrulesreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: eso-store
namespace: argocd
subjects:
- kind: ServiceAccount
name: eso-store
namespace: argocd
roleRef:
kind: Role
name: eso-store
apiGroup: rbac.authorization.k8s.io
---
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: argocd
spec:
provider:
kubernetes:
remoteNamespace: argocd
server:
caProvider:
type: ConfigMap
name: kube-root-ca.crt
namespace: argocd
key: ca.crt
auth:
serviceAccount:
name: eso-store
namespace: argocd
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: argocd-credentials
namespace: backstage
spec:
secretStoreRef:
name: argocd
kind: ClusterSecretStore
refreshInterval: "0"
target:
name: argocd-credentials
data:
- secretKey: ARGOCD_ADMIN_PASSWORD
remoteRef:
key: argocd-initial-admin-secret
property: password

View file

@ -0,0 +1,455 @@
apiVersion: v1
kind: Namespace
metadata:
name: backstage
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: backstage
namespace: backstage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: backstage-argo-worfklows
rules:
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: read-all
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: backstage-argo-worfklows
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: backstage-argo-worfklows
subjects:
- kind: ServiceAccount
name: backstage
namespace: backstage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: backstage-read-all
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: read-all
subjects:
- kind: ServiceAccount
name: backstage
namespace: backstage
---
apiVersion: v1
kind: ConfigMap
metadata:
name: backstage-config
namespace: backstage
data:
app-config.yaml: |
app:
title: CNOE Backstage
baseUrl: https://cnoe.localtest.me:443
organization:
name: CNOE
backend:
# Used for enabling authentication, secret is shared by all backend plugins
# See https://backstage.io/docs/tutorials/backend-to-backend-auth for
# information on the format
# auth:
# keys:
# - secret: ${BACKEND_SECRET}
baseUrl: https://cnoe.localtest.me:443
listen:
port: 7007
# Uncomment the following host directive to bind to specific interfaces
# host: 127.0.0.1
csp:
connect-src: ["'self'", 'http:', 'https:']
# Content-Security-Policy directives follow the Helmet format: https://helmetjs.github.io/#reference
# Default Helmet Content-Security-Policy values can be removed by setting the key to false
cors:
origin: https://cnoe.localtest.me:443
methods: [GET, HEAD, PATCH, POST, PUT, DELETE]
credentials: true
database:
client: pg
connection:
host: ${POSTGRES_HOST}
port: ${POSTGRES_PORT}
user: ${POSTGRES_USER}
password: ${POSTGRES_PASSWORD}
cache:
store: memory
# workingDirectory: /tmp # Use this to configure a working directory for the scaffolder, defaults to the OS temp-dir
integrations:
gitea:
- baseUrl: https://gitea.cnoe.localtest.me:443
host: gitea.cnoe.localtest.me:443
username: ${GITEA_USERNAME}
password: ${GITEA_PASSWORD}
- baseUrl: https://gitea.cnoe.localtest.me
host: gitea.cnoe.localtest.me
username: ${GITEA_USERNAME}
password: ${GITEA_PASSWORD}
# github:
# - host: github.com
# apps:
# - $include: github-integration.yaml
# - host: github.com
# # This is a Personal Access Token or PAT from GitHub. You can find out how to generate this token, and more information
# # about setting up the GitHub integration here: https://backstage.io/docs/getting-started/configuration#setting-up-a-github-integration
# token: ${GITHUB_TOKEN}
### Example for how to add your GitHub Enterprise instance using the API:
# - host: ghe.example.net
# apiBaseUrl: https://ghe.example.net/api/v3
# token: ${GHE_TOKEN}
# Reference documentation http://backstage.io/docs/features/techdocs/configuration
# Note: After experimenting with basic setup, use CI/CD to generate docs
# and an external cloud storage when deploying TechDocs for production use-case.
# https://backstage.io/docs/features/techdocs/how-to-guides#how-to-migrate-from-techdocs-basic-to-recommended-deployment-approach
techdocs:
builder: 'local' # Alternatives - 'external'
generator:
runIn: 'local'
publisher:
type: 'local' # Alternatives - 'googleGcs' or 'awsS3'. Read documentation for using alternatives.
auth:
environment: development
session:
secret: MW2sV-sIPngEl26vAzatV-6VqfsgAx4bPIz7PuE_2Lk=
providers:
keycloak-oidc:
development:
metadataUrl: ${KEYCLOAK_NAME_METADATA}
clientId: backstage
clientSecret: ${KEYCLOAK_CLIENT_SECRET}
prompt: auto
scaffolder:
# see https://backstage.io/docs/features/software-templates/configuration for software template options
defaultAuthor:
name: backstage-scaffolder
email: noreply
defaultCommitMessage: "backstage scaffolder"
catalog:
import:
entityFilename: catalog-info.yaml
pullRequestBranchName: backstage-integration
rules:
- allow: [Component, System, API, Resource, Location, Template]
locations:
# Examples from a public GitHub repository.
- type: url
target: https://gitea.cnoe.localtest.me:443/giteaAdmin/edfbuilder-shoot/raw/branch/main/stacks/ref-implementation/backstage-templates/entities/catalog-info.yaml
rules:
- allow: [Component, System, API, Resource, Location, Template, User, Group]
kubernetes:
serviceLocatorMethod:
type: 'multiTenant'
clusterLocatorMethods:
- $include: k8s-config.yaml
argocd:
username: admin
password: ${ARGOCD_ADMIN_PASSWORD}
appLocatorMethods:
- type: 'config'
instances:
- name: in-cluster
url: https://cnoe.localtest.me:443/argocd
username: admin
password: ${ARGOCD_ADMIN_PASSWORD}
argoWorkflows:
baseUrl: ${ARGO_WORKFLOWS_URL}
---
apiVersion: v1
kind: Secret
metadata:
name: k8s-config
namespace: backstage
stringData:
k8s-config.yaml: "type: 'config'\nclusters:\n - url: https://kubernetes.default.svc.cluster.local\n
\ name: local\n authProvider: 'serviceAccount'\n skipTLSVerify: true\n
\ skipMetricsLookup: true\n serviceAccountToken: \n $file: /var/run/secrets/kubernetes.io/serviceaccount/token\n
\ caData: \n $file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n"
---
apiVersion: v1
kind: Service
metadata:
name: backstage
namespace: backstage
spec:
ports:
- name: http
port: 7007
targetPort: http
selector:
app: backstage
---
apiVersion: v1
kind: Service
metadata:
labels:
app: postgresql
name: postgresql
namespace: backstage
spec:
clusterIP: None
ports:
- name: postgres
port: 5432
selector:
app: postgresql
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: backstage
namespace: backstage
annotations:
argocd.argoproj.io/sync-wave: "20"
spec:
replicas: 1
selector:
matchLabels:
app: backstage
template:
metadata:
labels:
app: backstage
spec:
containers:
- command:
- node
- packages/backend
- --config
- config/app-config.yaml
env:
- name: LOG_LEVEL
value: debug
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0"
envFrom:
- secretRef:
name: backstage-env-vars
- secretRef:
name: gitea-credentials
- secretRef:
name: argocd-credentials
image: ghcr.io/cnoe-io/backstage-app:9232d633b2698fffa6d0a73b715e06640d170162
name: backstage
ports:
- containerPort: 7007
name: http
volumeMounts:
- mountPath: /app/config
name: backstage-config
readOnly: true
serviceAccountName: backstage
volumes:
- name: backstage-config
projected:
sources:
- configMap:
items:
- key: app-config.yaml
path: app-config.yaml
name: backstage-config
- secret:
items:
- key: k8s-config.yaml
path: k8s-config.yaml
name: k8s-config
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: postgresql
name: postgresql
namespace: backstage
annotations:
argocd.argoproj.io/sync-wave: "10"
spec:
replicas: 1
selector:
matchLabels:
app: postgresql
serviceName: service-postgresql
template:
metadata:
labels:
app: postgresql
spec:
containers:
- env:
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: backstage-env-vars
key: POSTGRES_DB
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: backstage-env-vars
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: backstage-env-vars
key: POSTGRES_PASSWORD
image: docker.io/library/postgres:15.3-alpine3.18
name: postgres
ports:
- containerPort: 5432
name: postgresdb
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 300Mi
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "500Mi"
---
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
metadata:
name: backstage
namespace: backstage
spec:
length: 36
digits: 5
symbols: 5
symbolCharacters: "/-+"
noUpper: false
allowRepeat: true
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: backstage-oidc
namespace: backstage
spec:
secretStoreRef:
name: keycloak
kind: ClusterSecretStore
refreshInterval: "0"
target:
name: backstage-env-vars
template:
engineVersion: v2
data:
BACKSTAGE_FRONTEND_URL: https://cnoe.localtest.me:443/backstage
POSTGRES_HOST: postgresql.backstage.svc.cluster.local
POSTGRES_PORT: '5432'
POSTGRES_DB: backstage
POSTGRES_USER: backstage
POSTGRES_PASSWORD: "{{.POSTGRES_PASSWORD}}"
ARGO_WORKFLOWS_URL: https://cnoe.localtest.me:443/argo-workflows
KEYCLOAK_NAME_METADATA: https://cnoe.localtest.me:443/keycloak/realms/cnoe/.well-known/openid-configuration
KEYCLOAK_CLIENT_SECRET: "{{.BACKSTAGE_CLIENT_SECRET}}"
ARGOCD_AUTH_TOKEN: "argocd.token={{.ARGOCD_SESSION_TOKEN}}"
ARGO_CD_URL: 'https://argocd-server.argocd.svc.cluster.local/api/v1/'
data:
- secretKey: ARGOCD_SESSION_TOKEN
remoteRef:
key: keycloak-clients
property: ARGOCD_SESSION_TOKEN
- secretKey: BACKSTAGE_CLIENT_SECRET
remoteRef:
key: keycloak-clients
property: BACKSTAGE_CLIENT_SECRET
dataFrom:
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: backstage
rewrite:
- transform:
template: "POSTGRES_PASSWORD"
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-credentials
namespace: backstage
spec:
secretStoreRef:
name: gitea
kind: ClusterSecretStore
refreshInterval: "0"
target:
name: gitea-credentials
data:
- secretKey: GITEA_USERNAME
remoteRef:
key: gitea-credential
property: username
- secretKey: GITEA_PASSWORD
remoteRef:
key: gitea-credential
property: password
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: backstage
namespace: backstage
spec:
ingressClassName: "nginx"
rules:
- host: localhost
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: backstage
port:
name: http
- host: cnoe.localtest.me
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: backstage
port:
name: http

View file

@ -0,0 +1,73 @@
## Running idpbuilder in Codespaces in Browser
**_NOTE:_**: __Steps described below applies to running this implementation in Codespaces in **web browsers** (e.g. Firefox and Chrome).
If you are using Codespaces with GitHub CLI, steps described here do not apply to you.__
Let's create an instance of Codespaces.
![img.png](images/codespaces-create.png)
It may take a few minutes for it to be ready. Once it's ready, you can either get the latest release of idpbuilder or build from the main branch.
Build the idpbuilder binary.
- Get the latest release:
```bash
version=$(curl -Ls -o /dev/null -w %{url_effective} https://github.com/cnoe-io/idpbuilder/releases/latest)
version=${version##*/}
wget https://github.com/cnoe-io/idpbuilder/releases/download/${version}/idpbuilder-linux-amd64.tar.gz
tar xzf idpbuilder-linux-amd64.tar.gz
sudo mv ./idpbuilder /usr/local/bin/
```
- Alternatively, build from the main branch
```bash
make build
sudo mv ./idpbuilder /usr/local/bin/
```
Codespaces assigns random hostname to your specific instance. You need to make sure they are reflected correctly.
Instance host name is available as an environment variable (`CODESPACE_NAME`). Let's use it to setup our host names.
Run the following commands to update host name and ports. Port is set to 443 because this is the port used by the browser to access your instance.
Clone the [stacks](https://github.com/cnoe-io/stacks) repo.
```bash
cd ref-implementation
./replace.sh ${CODESPACE_NAME}-8080.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN} 443
cd -
```
Now you are ready to run idpbuilder with reference implementation.
```bash
idpbuilder create --protocol http \
--host ${CODESPACE_NAME}-8080.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN} \
--port 8080 --use-path-routing --package ref-implementation
```
Once idpbuilder finishes bootstrapping, you should have port 8080 forward in the port tab within Codespaces.
![](images/port.png)
You may get a 404 page after clicking the port 8080 forwarded address. This is completely normal because Backstage may not be ready yet.
Give it a few more minutes and it should redirect you to a Backstage page.
### Accessing UIs
If you'd like to track progress of deployment, go to `/argocd` path and login with your ArgoCD credentials.
For example run this command to get the URL for Argo CD:
```bash
echo https://${CODESPACE_NAME}-8080.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN}/argocd
```
From here on, you can follow the instructions in the [README](./README.md) file. The only difference is that the URL to access UIs is given by:
```echo
echo https://${CODESPACE_NAME}-8080.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN}
```
For example, if you need to access Argo Workflows UI, instead of going to `https://cnoe.localtest.me:8443/argo`,
you go to `https://${CODESPACE_NAME}-8080.${GITHUB_CODESPACES_PORT_FORWARDING_DOMAIN}/argo`

View file

@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-secrets
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: external-secrets
server: "https://kubernetes.default.svc"
source:
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
path: "stacks/ref-implementation/external-secrets/manifests"
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,12 @@
#!/bin/bash
set -e
INSTALL_YAML="manifests/install.yaml"
CHART_VERSION="0.9.11"
echo "# EXTERNAL SECRETS INSTALL RESOURCES" >${INSTALL_YAML}
echo "# This file is auto-generated with 'ref-impelmentation/external-secrets/generate-manifests.sh'" >>${INSTALL_YAML}
helm repo add external-secrets --force-update https://charts.external-secrets.io
helm repo update
helm template --namespace external-secrets external-secrets external-secrets/external-secrets -f values.yaml --version ${CHART_VERSION} >>${INSTALL_YAML}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: keycloak
namespace: argocd
labels:
example: ref-implementation
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
namespace: keycloak
server: "https://kubernetes.default.svc"
source:
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot
targetRevision: HEAD
path: "stacks/ref-implementation/keycloak/manifests"
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,30 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: keycloak-ingress-localhost
namespace: keycloak
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
ingressClassName: "nginx"
rules:
- host: localhost
http:
paths:
- path: /keycloak
pathType: ImplementationSpecific
backend:
service:
name: keycloak
port:
name: http
- host: cnoe.localtest.me
http:
paths:
- path: /keycloak
pathType: ImplementationSpecific
backend:
service:
name: keycloak
port:
name: http

View file

@ -0,0 +1,162 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: keycloak
---
apiVersion: v1
kind: Service
metadata:
name: keycloak
labels:
app: keycloak
spec:
ports:
- name: http
port: 8080
targetPort: 8080
selector:
app: keycloak
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: keycloak
name: keycloak
namespace: keycloak
annotations:
argocd.argoproj.io/sync-wave: "10"
spec:
replicas: 1
selector:
matchLabels:
app: keycloak
template:
metadata:
labels:
app: keycloak
spec:
containers:
- args:
- start-dev
env:
- name: KEYCLOAK_ADMIN
value: cnoe-admin
- name: KEYCLOAK_LOGLEVEL
value: ALL
- name: QUARKUS_TRANSACTION_MANAGER_ENABLE_RECOVERY
value: 'true'
envFrom:
- secretRef:
name: keycloak-config
image: quay.io/keycloak/keycloak:22.0.3
name: keycloak
ports:
- containerPort: 8080
name: http
readinessProbe:
httpGet:
path: /keycloak/realms/master
port: 8080
volumeMounts:
- mountPath: /opt/keycloak/conf
name: keycloak-config
readOnly: true
volumes:
- configMap:
name: keycloak-config
name: keycloak-config
---
apiVersion: v1
data:
keycloak.conf: |
# Database
# The database vendor.
db=postgres
# The username of the database user.
db-url=jdbc:postgresql://postgresql.keycloak.svc.cluster.local:5432/postgres
# The proxy address forwarding mode if the server is behind a reverse proxy.
proxy=edge
# hostname configuration
hostname=cnoe.localtest.me
http-relative-path=keycloak
# the admin url requires its own configuration to reflect correct url
hostname-debug=true
# this should only be allowed in development. NEVER in production.
hostname-strict=false
hostname-strict-backchannel=false
kind: ConfigMap
metadata:
name: keycloak-config
namespace: keycloak
---
apiVersion: v1
kind: Service
metadata:
labels:
app: postgresql
name: postgresql
namespace: keycloak
spec:
clusterIP: None
ports:
- name: postgres
port: 5432
selector:
app: postgresql
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: postgresql
name: postgresql
namespace: keycloak
spec:
replicas: 1
selector:
matchLabels:
app: postgresql
serviceName: service-postgresql
template:
metadata:
labels:
app: postgresql
spec:
containers:
- envFrom:
- secretRef:
name: keycloak-config
image: docker.io/library/postgres:15.3-alpine3.18
name: postgres
ports:
- containerPort: 5432
name: postgresdb
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 300Mi
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: "500Mi"

View file

@ -0,0 +1,366 @@
# resources here are used to configure keycloak instance for SSO
apiVersion: v1
kind: ServiceAccount
metadata:
name: keycloak-config
namespace: keycloak
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: keycloak-config
namespace: keycloak
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "create", "update", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: keycloak-config
namespace: keycloak
subjects:
- kind: ServiceAccount
name: keycloak-config
namespace: keycloak
roleRef:
kind: Role
name: keycloak-config
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: keycloak-config
namespace: argocd
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: keycloak-config
namespace: argocd
subjects:
- kind: ServiceAccount
name: keycloak-config
namespace: keycloak
roleRef:
kind: Role
name: keycloak-config
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: config-job
namespace: keycloak
data:
client-scope-groups-payload.json: |
{
"name": "groups",
"description": "groups a user belongs to",
"attributes": {
"consent.screen.text": "Access to groups a user belongs to.",
"display.on.consent.screen": "true",
"include.in.token.scope": "true",
"gui.order": ""
},
"type": "default",
"protocol": "openid-connect"
}
group-admin-payload.json: |
{"name":"admin"}
group-base-user-payload.json: |
{"name":"base-user"}
group-mapper-payload.json: |
{
"protocol": "openid-connect",
"protocolMapper": "oidc-group-membership-mapper",
"name": "groups",
"config": {
"claim.name": "groups",
"full.path": "false",
"id.token.claim": "true",
"access.token.claim": "true",
"userinfo.token.claim": "true"
}
}
realm-payload.json: |
{"realm":"cnoe","enabled":true}
user-password.json: |
{
"temporary": false,
"type": "password",
"value": "${USER1_PASSWORD}"
}
user-user1.json: |
{
"username": "user1",
"email": "",
"firstName": "user",
"lastName": "one",
"requiredActions": [],
"emailVerified": false,
"groups": [
"/admin"
],
"enabled": true
}
user-user2.json: |
{
"username": "user2",
"email": "",
"firstName": "user",
"lastName": "two",
"requiredActions": [],
"emailVerified": false,
"groups": [
"/base-user"
],
"enabled": true
}
argo-client-payload.json: |
{
"protocol": "openid-connect",
"clientId": "argo-workflows",
"name": "Argo Workflows Client",
"description": "Used for Argo Workflows SSO",
"publicClient": false,
"authorizationServicesEnabled": false,
"serviceAccountsEnabled": false,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"standardFlowEnabled": true,
"frontchannelLogout": true,
"attributes": {
"saml_idp_initiated_sso_url_name": "",
"oauth2.device.authorization.grant.enabled": false,
"oidc.ciba.grant.enabled": false
},
"alwaysDisplayInConsole": false,
"rootUrl": "",
"baseUrl": "",
"redirectUris": [
"https://cnoe.localtest.me:443/argo-workflows/oauth2/callback"
],
"webOrigins": [
"/*"
]
}
backstage-client-payload.json: |
{
"protocol": "openid-connect",
"clientId": "backstage",
"name": "Backstage Client",
"description": "Used for Backstage SSO",
"publicClient": false,
"authorizationServicesEnabled": false,
"serviceAccountsEnabled": false,
"implicitFlowEnabled": false,
"directAccessGrantsEnabled": true,
"standardFlowEnabled": true,
"frontchannelLogout": true,
"attributes": {
"saml_idp_initiated_sso_url_name": "",
"oauth2.device.authorization.grant.enabled": false,
"oidc.ciba.grant.enabled": false
},
"alwaysDisplayInConsole": false,
"rootUrl": "",
"baseUrl": "",
"redirectUris": [
"https://cnoe.localtest.me:443/api/auth/keycloak-oidc/handler/frame"
],
"webOrigins": [
"/*"
]
}
---
apiVersion: batch/v1
kind: Job
metadata:
name: config
namespace: keycloak
annotations:
argocd.argoproj.io/hook: PostSync
spec:
template:
metadata:
generateName: config
spec:
serviceAccountName: keycloak-config
restartPolicy: Never
volumes:
- name: keycloak-config
secret:
secretName: keycloak-config
- name: config-payloads
configMap:
name: config-job
containers:
- name: kubectl
image: docker.io/library/ubuntu:22.04
volumeMounts:
- name: keycloak-config
readOnly: true
mountPath: "/var/secrets/"
- name: config-payloads
readOnly: true
mountPath: "/var/config/"
command: ["/bin/bash", "-c"]
args:
- |
#! /bin/bash
set -ex -o pipefail
apt -qq update && apt -qq install curl jq -y
ADMIN_PASSWORD=$(cat /var/secrets/KEYCLOAK_ADMIN_PASSWORD)
USER1_PASSWORD=$(cat /var/secrets/USER_PASSWORD)
KEYCLOAK_URL=http://keycloak.keycloak.svc.cluster.local:8080/keycloak
KEYCLOAK_TOKEN=$(curl -sS --fail-with-body -X POST -H "Content-Type: application/x-www-form-urlencoded" \
--data-urlencode "username=cnoe-admin" \
--data-urlencode "password=${ADMIN_PASSWORD}" \
--data-urlencode "grant_type=password" \
--data-urlencode "client_id=admin-cli" \
${KEYCLOAK_URL}/realms/master/protocol/openid-connect/token | jq -e -r '.access_token')
set +e
curl --fail-with-body -H "Authorization: bearer ${KEYCLOAK_TOKEN}" "${KEYCLOAK_URL}/admin/realms/cnoe" &> /dev/null
if [ $? -eq 0 ]; then
exit 0
fi
set -e
curl -sS -LO "https://dl.k8s.io/release/v1.28.3//bin/linux/amd64/kubectl"
chmod +x kubectl
echo "creating cnoe realm and groups"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/realm-payload.json \
${KEYCLOAK_URL}/admin/realms
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/client-scope-groups-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/group-admin-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/groups
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/group-base-user-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/groups
# Create scope mapper
echo 'adding group claim to tokens'
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/group-mapper-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes/${CLIENT_SCOPE_GROUPS_ID}/protocol-mappers/models
echo "creating test users"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/user-user1.json \
${KEYCLOAK_URL}/admin/realms/cnoe/users
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/user-user2.json \
${KEYCLOAK_URL}/admin/realms/cnoe/users
USER1ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" "${KEYCLOAK_URL}/admin/realms/cnoe/users?lastName=one" | jq -r '.[0].id')
USER2ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" "${KEYCLOAK_URL}/admin/realms/cnoe/users?lastName=two" | jq -r '.[0].id')
echo "setting user passwords"
jq -r --arg pass ${USER1_PASSWORD} '.value = $pass' /var/config/user-password.json > /tmp/user-password-to-be-applied.json
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT --data @/tmp/user-password-to-be-applied.json \
${KEYCLOAK_URL}/admin/realms/cnoe/users/${USER1ID}/reset-password
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT --data @/tmp/user-password-to-be-applied.json \
${KEYCLOAK_URL}/admin/realms/cnoe/users/${USER2ID}/reset-password
echo "creating Argo Workflows client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/argo-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "argo-workflows") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
ARGO_WORKFLOWS_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
echo "creating Backstage client"
curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X POST --data @/var/config/backstage-client-payload.json \
${KEYCLOAK_URL}/admin/realms/cnoe/clients
CLIENT_ID=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients | jq -e -r '.[] | select(.clientId == "backstage") | .id')
CLIENT_SCOPE_GROUPS_ID=$(curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X GET ${KEYCLOAK_URL}/admin/realms/cnoe/client-scopes | jq -e -r '.[] | select(.name == "groups") | .id')
curl -sS -H "Content-Type: application/json" -H "Authorization: bearer ${KEYCLOAK_TOKEN}" -X PUT ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID}/default-client-scopes/${CLIENT_SCOPE_GROUPS_ID}
BACKSTAGE_CLIENT_SECRET=$(curl -sS -H "Content-Type: application/json" \
-H "Authorization: bearer ${KEYCLOAK_TOKEN}" \
-X GET ${KEYCLOAK_URL}/admin/realms/cnoe/clients/${CLIENT_ID} | jq -e -r '.secret')
ARGOCD_PASSWORD=$(./kubectl -n argocd get secret argocd-initial-admin-secret -o go-template='{{.data.password | base64decode }}')
ARGOCD_SESSION_TOKEN=$(curl -k -sS http://argocd-server.argocd.svc.cluster.local:443/api/v1/session -H 'Content-Type: application/json' -d "{\"username\":\"admin\",\"password\":\"${ARGOCD_PASSWORD}\"}" | jq -r .token)
echo \
"apiVersion: v1
kind: Secret
metadata:
name: keycloak-clients
namespace: keycloak
type: Opaque
stringData:
ARGO_WORKFLOWS_CLIENT_SECRET: ${ARGO_WORKFLOWS_CLIENT_SECRET}
ARGO_WORKFLOWS_CLIENT_ID: argo-workflows
ARGOCD_SESSION_TOKEN: ${ARGOCD_SESSION_TOKEN}
BACKSTAGE_CLIENT_SECRET: ${BACKSTAGE_CLIENT_SECRET}
BACKSTAGE_CLIENT_ID: backstage
" > /tmp/secret.yaml
./kubectl apply -f /tmp/secret.yaml

View file

@ -0,0 +1,179 @@
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
metadata:
name: keycloak
namespace: keycloak
spec:
length: 36
digits: 5
symbols: 5
symbolCharacters: "/-+"
noUpper: false
allowRepeat: true
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: keycloak-config
namespace: keycloak
spec:
refreshInterval: "0"
target:
name: keycloak-config
template:
metadata:
labels:
cnoe.io/cli-secret: "true"
cnoe.io/package-name: keycloak
engineVersion: v2
data:
KEYCLOAK_ADMIN_PASSWORD: "{{.KEYCLOAK_ADMIN_PASSWORD}}"
KC_DB_USERNAME: keycloak
KC_DB_PASSWORD: "{{.KC_DB_PASSWORD}}"
POSTGRES_DB: keycloak
POSTGRES_USER: keycloak
POSTGRES_PASSWORD: "{{.KC_DB_PASSWORD}}"
USER_PASSWORD: "{{.USER_PASSWORD}}"
dataFrom:
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: keycloak
rewrite:
- transform:
template: "KEYCLOAK_ADMIN_PASSWORD"
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: keycloak
rewrite:
- transform:
template: "KC_DB_PASSWORD"
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: keycloak
rewrite:
- transform:
template: "USER_PASSWORD"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eso-store
namespace: keycloak
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: keycloak
name: eso-store
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- authorization.k8s.io
resources:
- selfsubjectrulesreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: eso-store
namespace: keycloak
subjects:
- kind: ServiceAccount
name: eso-store
namespace: keycloak
roleRef:
kind: Role
name: eso-store
apiGroup: rbac.authorization.k8s.io
---
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: keycloak
spec:
provider:
kubernetes:
remoteNamespace: keycloak
server:
caProvider:
type: ConfigMap
name: kube-root-ca.crt
namespace: keycloak
key: ca.crt
auth:
serviceAccount:
name: eso-store
namespace: keycloak
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eso-store
namespace: gitea
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: eso-store
namespace: gitea
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- authorization.k8s.io
resources:
- selfsubjectrulesreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: eso-store
namespace: gitea
subjects:
- kind: ServiceAccount
name: eso-store
namespace: gitea
roleRef:
kind: Role
name: eso-store
apiGroup: rbac.authorization.k8s.io
---
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: gitea
spec:
provider:
kubernetes:
remoteNamespace: gitea
server:
caProvider:
type: ConfigMap
name: kube-root-ca.crt
namespace: gitea
key: ca.crt
auth:
serviceAccount:
name: eso-store
namespace: gitea

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metric-server
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://kubernetes-sigs.github.io/metrics-server
targetRevision: 3.12.1
helm:
releaseName: metrics-server
values: |
args:
- --kubelet-insecure-tls #required for kind/minikube
chart: metrics-server
destination:
server: 'https://kubernetes.default.svc'
namespace: kube-system
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,36 @@
# this script replaces hostname and port used by this implementation.
# intended for use in environments such as Codespaces where external host and port need to be updated to access in-cluster resources.
#!/bin/bash
set -e
# Check if the new port number is provided as an argument
if [ "$#" -ne 2 ]; then
echo "Usage: NEW_HOST NEW_PORT"
exit 1
fi
# Assign the first script argument to NEW_PORT
NEW_HOST="$1"
NEW_PORT="$2"
# Base directory to start from, "." means the current directory
CURRENT_DIR=$(echo "${PWD##*/}")
if [[ ${CURRENT_DIR} != "ref-implementation" ]]; then
echo "please run this script from the ref-implementation directory"
exit 10
fi
BASE_DIRECTORY="."
# Find all .yaml files recursively starting from the base directory
# and perform an in-place search and replace from 8443 to the new port
find "$BASE_DIRECTORY" -type f -name "*.yaml" -exec sed -i "s/8443/${NEW_PORT}/g" {} +
find "$BASE_DIRECTORY" -type f -name "*.yaml" -exec sed -i "s/cnoe\.localtest\.me/${NEW_HOST}/g" {} +
# Remove hostname-port configuration if the new port is 443. Browsers strip 443 but keycloak still expects 443 in url.
if [[ ${NEW_PORT} == "443" ]]; then
sed -i "/hostname-port/d" keycloak/manifests/install.yaml
sed -i "/hostname-admin/d" keycloak/manifests/install.yaml
sed -i '0,/:443/{s/:443//}' argo-workflows/manifests/dev/patches/cm-argo-workflows.yaml
fi
echo "Replacement complete."

View file

@ -0,0 +1,25 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: spark-operator
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
sources:
- repoURL: https://kubeflow.github.io/spark-operator
targetRevision: 1.1.27
helm:
releaseName: spark-operator
chart: spark-operator
destination:
server: "https://kubernetes.default.svc"
namespace: spark-operator
syncPolicy:
syncOptions:
- CreateNamespace=true
automated:
selfHeal: true

View file

@ -0,0 +1,23 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: create-new-cluster-guestbook
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
destination:
name: in-cluster
namespace: crossplane-system
source:
path: stacks/second-cluster/create-new-cluster-guestbook
repoURL: https://gitea.cnoe.localtest.me/giteaAdmin/edfbuilder-shoot.git
targetRevision: HEAD

View file

@ -0,0 +1,16 @@
apiVersion: cluster.argocd.crossplane.io/v1alpha1
kind: Cluster
metadata:
name: argo-app-test
labels:
purpose: dev
spec:
forProvider:
config:
kubeconfigSecretRef:
key: kubeconfig
namespace: crossplane-system
name: argo-app-test-kubeconf
name: argo-app-test-cluster
providerConfigRef:
name: argocd-provider

View file

@ -0,0 +1,26 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-second-cluster-demo
namespace: argocd
spec:
destination:
namespace: jojo
name: argo-app-test-cluster
project: default
source:
path: guestbook
repoURL: https://github.com/argoproj/argocd-example-apps.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
backoff:
duration: 5s
factor: 2
maxDuration: 1m

View file

@ -0,0 +1,22 @@
apiVersion: container.kind.crossplane.io/v1alpha1
kind: KindCluster
metadata:
name: argo-app-test
spec:
forProvider:
kindConfig: |
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
providerConfigRef:
name: kind-provider
writeConnectionSecretToRef:
namespace: crossplane-system
name: argo-app-test-kubeconf