Compare commits
186 Commits
1e7ef34b5c
...
latest
| Author | SHA1 | Date | |
|---|---|---|---|
| 1c60146f33 | |||
| 23adf5a6be | |||
| 790d2152d3 | |||
| 3f156984d9 | |||
| d834e58879 | |||
| 3a4a28598a | |||
| cc00e8de8b | |||
| 6404f7772b | |||
| aee2d151a3 | |||
| 15dd965c7c | |||
| 04be76502d | |||
| 620b5ee9b1 | |||
| b3ce3d5067 | |||
| cefc5e5565 | |||
| e25b5947fc | |||
| cde2250d27 | |||
| 4946ee57c1 | |||
| a4ebfa259c | |||
| c447b1be48 | |||
| 35f6eaf618 | |||
| f6603d55a7 | |||
| 742396fd8e | |||
| fd004a7479 | |||
| 08331f6ae3 | |||
| 89173418cd | |||
| 69356bb160 | |||
| cf6f3546d4 | |||
| 89a5d04c42 | |||
| d0e9acf392 | |||
| 7f22d664bb | |||
| 624b2bb1b8 | |||
| 1105260935 | |||
| ef850cd4f1 | |||
| 333490d4c2 | |||
| 083891c884 | |||
| 655e0691c2 | |||
| 9d236e1f97 | |||
| c4d4098b99 | |||
| ff96741d23 | |||
| 9cfab300d0 | |||
| c655dec0bf | |||
| dfe8eb3d46 | |||
| 6e4a07076a | |||
| b740b48782 | |||
| d1acf204ce | |||
| 73714929f9 | |||
| 81177b18d5 | |||
| e2c84e0bf8 | |||
| dea4045dc6 | |||
| e37aac251a | |||
| 2f06076990 | |||
| 0c3cce909b | |||
| c11a777700 | |||
| 9e6467f6bb | |||
| 029918de44 | |||
| 7697f2f36e | |||
| 84a03a6eac | |||
| 209b21c83f | |||
| 2b032964a2 | |||
| 6e2597ffa7 | |||
| dc6f4a0555 | |||
| 7f2240ff6a | |||
| 3e33b17c2c | |||
| aed1806127 | |||
| 57db805f10 | |||
| 8dd4f30803 | |||
| fcf2450a8e | |||
| c2818b1c8c | |||
| eda474ec92 | |||
| f987f9f3ec | |||
| e3364afe28 | |||
| 5d8a4e1791 | |||
| b783db47b9 | |||
| 2a623cf21f | |||
| be7e80d716 | |||
| 8e42b7f782 | |||
| db25c37cde | |||
| f7144a7cdf | |||
| 687b6585e6 | |||
| 52dd463de1 | |||
| 1ac6d41e26 | |||
| e41c5e8ca7 | |||
| fc203268bc | |||
| a64f2b4bfe | |||
| 768f29680b | |||
| f57ea1afd9 | |||
| 43860d2464 | |||
| 85f6f81e23 | |||
| 55bbc6d2d4 | |||
| 3744e9fb82 | |||
| 94d313666a | |||
| 47d88bdf99 | |||
| ee6b0c8ab3 | |||
| 9034ec500b | |||
| 8fbe87890c | |||
| 837371313b | |||
| f854919802 | |||
| 3f58967ebd | |||
| 9d769840b7 | |||
| d75dd0fca4 | |||
| f55875bc8f | |||
| 3cd7a391a1 | |||
| 189a664a23 | |||
| c52bfb3045 | |||
| 50dc452b0d | |||
| ece670c77e | |||
| a40c495e26 | |||
| 0ccb40b8fc | |||
| 70019d0775 | |||
| 8553165048 | |||
| a8058e745d | |||
| c8501f505b | |||
| 4709f6ba84 | |||
| d61af7e58c | |||
| e93179896b | |||
| dbb6381898 | |||
| 1e4a007d72 | |||
| 5d62486f55 | |||
| 7558f369c5 | |||
| 38e230a9a5 | |||
| e20674bd1d | |||
| f98fad2e88 | |||
| 69f9ff7bfb | |||
| 2c849582c9 | |||
| 3ddf7b22b1 | |||
| 79349f9743 | |||
| 9a3f4bac60 | |||
| 556617ece5 | |||
| d35c568250 | |||
| 878eeb1c4b | |||
| 815255d4da | |||
| 96a4e310c8 | |||
| bb2dc111a0 | |||
| d63ee71bfa | |||
| ebc754b4af | |||
| 76fcc1da6c | |||
| 0f012ef2ad | |||
| ccf14644a7 | |||
| 2ae346e8ef | |||
| c2cfef7397 | |||
| c95f585e80 | |||
| aa6a51e4b5 | |||
| 71b6b05c41 | |||
| 17678c914a | |||
| d073c01735 | |||
| 571c3a4dbb | |||
| 31f6b361ac | |||
| 5127028f6d | |||
| 50847afaa0 | |||
| e788b770d9 | |||
| ed0878a9bc | |||
| ad4708fdff | |||
| cf707d1887 | |||
| f111b6337d | |||
| 7a33d8212b | |||
| e9a4de02cc | |||
| 027c9edb6d | |||
| 6ad3654593 | |||
| f0e10ed035 | |||
| 25946ad705 | |||
| c7a97af6a9 | |||
| ea82169286 | |||
| 38c43eee1d | |||
| ae368b504c | |||
| 15c3ca05f8 | |||
| 3ebbd128e1 | |||
| 3eec7c3d02 | |||
| 29bad892fc | |||
| 4d6a5c4984 | |||
| 9105673f57 | |||
| 727cc862bd | |||
| 42151c6eb6 | |||
| 03acc85016 | |||
| 779b2f8255 | |||
| 56ca246d9b | |||
| a8df93957f | |||
| 04b4424ceb | |||
| c1c8b93632 | |||
| d8c72b579b | |||
| d4e3f82013 | |||
| 0d96daac03 | |||
| 62f039141c | |||
| a7dca761f2 | |||
| 5663e93c2d | |||
| d86d651c4f | |||
| 088c534cde |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1 +1,2 @@
|
||||
**/secret.yaml
|
||||
**/.DS_Store
|
||||
.idea/
|
||||
|
||||
BIN
deploy/.DS_Store
vendored
BIN
deploy/.DS_Store
vendored
Binary file not shown.
93
deploy/affine/deployment.yaml
Normal file
93
deploy/affine/deployment.yaml
Normal file
@@ -0,0 +1,93 @@
|
||||
# --------------------------------------------------------------------
|
||||
# 5b) Deployment: affine-server (serves HTTP on port 3010)
|
||||
# --------------------------------------------------------------------
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: affine-server
|
||||
namespace: affine
|
||||
labels:
|
||||
app: affine-server
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: affine-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: affine-server
|
||||
spec:
|
||||
initContainers:
|
||||
- name: affine-migrate
|
||||
image: ghcr.io/toeverything/affine-graphql:stable-9e7280c
|
||||
command: ["sh", "-c", "node ./scripts/self-host-predeploy.js"]
|
||||
env:
|
||||
- name: REDIS_SERVER_HOST
|
||||
value: "redis-lb.redis.svc.cluster.local"
|
||||
- name: REDIS_SERVER_PORT
|
||||
value: "6379"
|
||||
- name: DATABASE_URL
|
||||
value: >
|
||||
postgresql://$(DB_USERNAME):$(DB_PASSWORD)@postgres-base-rw.postgres.svc.cluster.local:5432/$(DB_DATABASE)
|
||||
- name: AFFINE_SERVER_PORT
|
||||
value: "3010"
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: affine-db-secret
|
||||
volumeMounts:
|
||||
- name: affine-storage
|
||||
mountPath: /root/.affine/storage
|
||||
- name: affine-config
|
||||
mountPath: /root/.affine/config
|
||||
containers:
|
||||
- name: affine
|
||||
image: ghcr.io/toeverything/affine-graphql:stable-9e7280c
|
||||
ports:
|
||||
- containerPort: 3010
|
||||
name: http
|
||||
env:
|
||||
- name: NODE_TLS_REJECT_UNAUTHORIZED
|
||||
value: "0"
|
||||
- name: AFFINE_SERVER_HTTPS
|
||||
value: "true"
|
||||
- name: AFFINE_SERVER_HOST
|
||||
value: "affine.prod.panic.haus"
|
||||
- name: REDIS_SERVER_HOST
|
||||
value: "redis-lb.redis.svc.cluster.local"
|
||||
- name: REDIS_SERVER_PORT
|
||||
value: "6379"
|
||||
- name: DATABASE_URL
|
||||
value: >-
|
||||
postgresql://$(DB_USERNAME):$(DB_PASSWORD)@postgres-base-rw.postgres.svc.cluster.local:5432/$(DB_DATABASE)
|
||||
- name: AFFINE_SERVER_EXTERNAL_URL
|
||||
value: "https://affine.prod.panic.haus"
|
||||
- name: AFFINE_SERVER_PORT
|
||||
value: "3010"
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: affine-db-secret
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3010
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 3010
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 20
|
||||
volumeMounts:
|
||||
- name: affine-storage
|
||||
mountPath: /root/.affine/storage
|
||||
- name: affine-config
|
||||
mountPath: /root/.affine/config
|
||||
volumes:
|
||||
- name: affine-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: affine-storage-pvc
|
||||
- name: affine-config
|
||||
persistentVolumeClaim:
|
||||
claimName: affine-config-pvc
|
||||
27
deploy/affine/ingress.yaml
Normal file
27
deploy/affine/ingress.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: affine-ingress
|
||||
namespace: affine
|
||||
annotations:
|
||||
# (If you’re using cert-manager + Let’s Encrypt)
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- affine.prod.panic.haus # ← replace with your desired Affine hostname
|
||||
secretName: affine-tls # ← must match an existing TLS Secret for that host
|
||||
rules:
|
||||
- host: affine.prod.panic.haus # ← change to whatever subdomain you choose
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: affine-server
|
||||
port:
|
||||
number: 3010
|
||||
11
deploy/affine/kustomization.yaml
Normal file
11
deploy/affine/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: affine
|
||||
|
||||
resources:
|
||||
- secret.yaml
|
||||
- pvc.yaml
|
||||
- service.yaml
|
||||
- deployment.yaml
|
||||
- ingress.yaml
|
||||
28
deploy/affine/pvc.yaml
Normal file
28
deploy/affine/pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# 3a) PVC for Affine’s upload storage (~/root/.affine/storage)
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: affine-storage-pvc
|
||||
namespace: affine
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
|
||||
---
|
||||
# 3b) PVC for Affine’s config (~/root/.affine/config)
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: affine-config-pvc
|
||||
namespace: affine
|
||||
spec:
|
||||
storageClassName: longhorn
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
10
deploy/affine/secret.yaml
Normal file
10
deploy/affine/secret.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: affine-db-secret
|
||||
namespace: affine
|
||||
stringData:
|
||||
# Database credentials for Affine
|
||||
DB_USERNAME: "affine"
|
||||
DB_PASSWORD: "tqMB9UjJ7GZrWnux4sJ9nDPR4xQLq6Vz"
|
||||
DB_DATABASE: "affine_db"
|
||||
15
deploy/affine/service.yaml
Normal file
15
deploy/affine/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# This Service exposes Affine on port 3010 within the cluster
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: affine-server
|
||||
namespace: affine
|
||||
spec:
|
||||
selector:
|
||||
app: affine-server
|
||||
ports:
|
||||
- name: http
|
||||
port: 3010
|
||||
targetPort: 3010
|
||||
protocol: TCP
|
||||
type: ClusterIP
|
||||
350
deploy/appflowy/deployment.yaml
Normal file
350
deploy/appflowy/deployment.yaml
Normal file
@@ -0,0 +1,350 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: gotrue
|
||||
namespace: appflowy
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: gotrue
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: gotrue
|
||||
spec:
|
||||
containers:
|
||||
- name: gotrue
|
||||
image: appflowyinc/gotrue:latest
|
||||
ports:
|
||||
- containerPort: 9999
|
||||
env:
|
||||
- name: GOTRUE_SAML_ENABLED
|
||||
value: "true"
|
||||
- name: GOTRUE_SAML_PRIVATE_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: GOTRUE_SAML_PRIVATE_KEY
|
||||
# ----- DB (Postgres HA) -----
|
||||
- name: GOTRUE_DB_DRIVER
|
||||
value: postgres
|
||||
- name: DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: GOTRUE_DATABASE_URL
|
||||
- name: GOTRUE_ADMIN_EMAIL
|
||||
value: hello@beatrice.wtf
|
||||
- name: GOTRUE_ADMIN_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: GOTRUE_ADMIN_PASSWORD
|
||||
- name: GOTRUE_DISABLE_SIGNUP
|
||||
value: "true"
|
||||
- name: GOTRUE_SITE_URL
|
||||
value: "appflowy-flutter://"
|
||||
- name: GOTRUE_URI_ALLOW_LIST
|
||||
value: "**"
|
||||
- name: GOTRUE_JWT_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: GOTRUE_JWT_SECRET
|
||||
- name: GOTRUE_JWT_EXP
|
||||
value: "7200"
|
||||
- name: GOTRUE_SMTP_HOST
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_HOST
|
||||
- name: GOTRUE_SMTP_PORT
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_PORT
|
||||
- name: GOTRUE_SMTP_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_USER
|
||||
- name: GOTRUE_SMTP_PASS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_PASS
|
||||
- name: GOTRUE_SMTP_ADMIN_EMAIL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_USER
|
||||
- name: PORT
|
||||
value: "9999"
|
||||
- name: GOTRUE_JWT_ADMIN_GROUP_NAME
|
||||
value: supabase_admin
|
||||
- name: API_EXTERNAL_URL
|
||||
value: https://orbit.panic.haus/gotrue
|
||||
- name: GOTRUE_MAILER_URLPATHS_CONFIRMATION
|
||||
value: /gotrue/verify
|
||||
- name: GOTRUE_MAILER_URLPATHS_INVITE
|
||||
value: /gotrue/verify
|
||||
- name: GOTRUE_MAILER_URLPATHS_RECOVERY
|
||||
value: /gotrue/verify
|
||||
- name: GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE
|
||||
value: /gotrue/verify
|
||||
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: appflowy-cloud
|
||||
namespace: appflowy
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: appflowy-cloud
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: appflowy-cloud
|
||||
spec:
|
||||
containers:
|
||||
- name: appflowy-cloud
|
||||
image: appflowyinc/appflowy_cloud:latest
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
env:
|
||||
# ----- Database -----
|
||||
- name: APPFLOWY_DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_DATABASE_URL
|
||||
- name: APPFLOWY_REDIS_URI
|
||||
value: "redis://redis-lb.redis.svc.cluster.local:6379"
|
||||
|
||||
# ----- GoTrue (Auth) -----
|
||||
- name: APPFLOWY_GOTRUE_BASE_URL
|
||||
value: "http://gotrue.appflowy.svc.cluster.local:9999"
|
||||
- name: APPFLOWY_GOTRUE_JWT_SECRET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: GOTRUE_JWT_SECRET
|
||||
- name: APPFLOWY_GOTRUE_JWT_EXP
|
||||
value: "7200"
|
||||
|
||||
# ----- S3 / Minio -----
|
||||
- name: APPFLOWY_S3_USE_MINIO
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_S3_USE_MINIO
|
||||
- name: APPFLOWY_S3_MINIO_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_S3_MINIO_URL
|
||||
- name: APPFLOWY_S3_BUCKET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_S3_BUCKET
|
||||
- name: APPFLOWY_S3_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: AWS_REGION
|
||||
- name: APPFLOWY_S3_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: AWS_ACCESS_KEY
|
||||
- name: APPFLOWY_S3_SECRET_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: AWS_SECRET_KEY
|
||||
#- name: APPFLOWY_S3_PRESIGNED_URL_ENDPOINT
|
||||
# value: "https://minio.example.com"
|
||||
# ← Replace with your actual public Minio endpoint if different
|
||||
|
||||
# ----- Mailer (AppFlowy Cloud) -----
|
||||
- name: APPFLOWY_MAILER_SMTP_HOST
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_HOST
|
||||
- name: APPFLOWY_MAILER_SMTP_PORT
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_PORT
|
||||
- name: APPFLOWY_MAILER_SMTP_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_USER
|
||||
- name: APPFLOWY_MAILER_SMTP_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_PASS
|
||||
- name: APPFLOWY_MAILER_SMTP_EMAIL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_USER
|
||||
- name: APPFLOWY_MAILER_SMTP_TLS_KIND
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: SMTP_TLS_KIND
|
||||
|
||||
# ----- General -----
|
||||
- name: APPFLOWY_ACCESS_CONTROL
|
||||
value: "true"
|
||||
- name: RUST_LOG
|
||||
value: info
|
||||
- name: APPFLOWY_ENVIRONMENT
|
||||
value: production
|
||||
- name: APPFLOWY_WEB_URL
|
||||
value: "https://orbit.panic.haus" # ← your public AppFlowy URL
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8000
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 20
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: admin-frontend
|
||||
namespace: appflowy
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: admin-frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: admin-frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: admin-frontend
|
||||
image: appflowyinc/admin_frontend:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
env:
|
||||
- name: ADMIN_FRONTEND_REDIS_URL
|
||||
value: "redis://redis-lb.redis.svc.cluster.local:6379"
|
||||
- name: ADMIN_FRONTEND_GOTRUE_URL
|
||||
value: "http://gotrue.appflowy.svc.cluster.local:9999"
|
||||
- name: ADMIN_FRONTEND_APPFLOWY_CLOUD_URL
|
||||
value: "http://appflowy-cloud.appflowy.svc.cluster.local:8000"
|
||||
- name: ADMIN_FRONTEND_PATH_PREFIX
|
||||
value: "/console"
|
||||
- name: ADMIN_FRONTEND_PORT
|
||||
value: "80"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /console
|
||||
port: 80
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: appflowy-worker
|
||||
namespace: appflowy
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: appflowy-worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: appflowy-worker
|
||||
spec:
|
||||
containers:
|
||||
- name: appflowy-worker
|
||||
image: appflowyinc/appflowy_worker:latest
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: info
|
||||
- name: APPFLOWY_ENVIRONMENT
|
||||
value: production
|
||||
- name: APPFLOWY_WORKER_REDIS_URL
|
||||
value: "redis://redis-lb.redis.svc.cluster.local:6379"
|
||||
- name: APPFLOWY_WORKER_DATABASE_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: GOTRUE_DATABASE_URL
|
||||
- name: APPFLOWY_WORKER_DATABASE_NAME
|
||||
value: appflowy_db
|
||||
- name: APPFLOWY_S3_USE_MINIO
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_S3_USE_MINIO
|
||||
- name: APPFLOWY_S3_MINIO_URL
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_S3_MINIO_URL
|
||||
- name: APPFLOWY_S3_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: AWS_ACCESS_KEY
|
||||
- name: APPFLOWY_S3_SECRET_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: AWS_SECRET_KEY
|
||||
- name: APPFLOWY_S3_BUCKET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: appflowy-secrets
|
||||
key: APPFLOWY_S3_BUCKET
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: appflowy-web
|
||||
namespace: appflowy
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: appflowy-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: appflowy-web
|
||||
spec:
|
||||
containers:
|
||||
- name: appflowy-web
|
||||
image: appflowyinc/appflowy_web:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
env:
|
||||
- name: APPFLOWY_CLOUD_URL
|
||||
value: "http://appflowy-cloud.appflowy.svc.cluster.local:8000"
|
||||
31
deploy/appflowy/gotrue-ingress.yaml
Normal file
31
deploy/appflowy/gotrue-ingress.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: appflowy-gotrue-ingress
|
||||
namespace: appflowy
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/use-regex: "true"
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /$2
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
|
||||
tls:
|
||||
- hosts:
|
||||
- orbit.panic.haus
|
||||
secretName: appflowy-tls
|
||||
|
||||
rules:
|
||||
- host: orbit.panic.haus
|
||||
http:
|
||||
paths:
|
||||
# GoTrue: rewrite /gotrue(/|$)(.*) → /$2
|
||||
- path: /gotrue(/|$)(.*)
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: gotrue
|
||||
port:
|
||||
number: 9999
|
||||
56
deploy/appflowy/ingress.yaml
Normal file
56
deploy/appflowy/ingress.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: appflowy-ingress
|
||||
namespace: appflowy
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
|
||||
tls:
|
||||
- hosts:
|
||||
- orbit.panic.haus # ← replace with your public domain
|
||||
secretName: appflowy-tls
|
||||
|
||||
rules:
|
||||
- host: orbit.panic.haus
|
||||
http:
|
||||
paths:
|
||||
# ┌──────────────────────────────────────────────────────────────────────────────┐
|
||||
# │ 1) Admin UI (served under /console) │
|
||||
# └──────────────────────────────────────────────────────────────────────────────┘
|
||||
- path: /console
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: admin-frontend
|
||||
port:
|
||||
number: 80
|
||||
|
||||
# ┌──────────────────────────────────────────────────────────────────────────────┐
|
||||
# │ 3) AppFlowy-Cloud API & Web │
|
||||
# • If you want API served on /api, and the static Web on / │
|
||||
# • You could also send all traffic to appflowy-web and let it call │
|
||||
# • the backend at /api internally. │
|
||||
# └──────────────────────────────────────────────────────────────────────────────┘
|
||||
|
||||
# a) Direct all `/api/*` calls to the backend service
|
||||
- path: /api
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: appflowy-cloud
|
||||
port:
|
||||
number: 8000
|
||||
|
||||
# b) Everything else (root path) → appflowy-web (static UI)
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: appflowy-web
|
||||
port:
|
||||
number: 80
|
||||
11
deploy/appflowy/kustomization.yaml
Normal file
11
deploy/appflowy/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: appflowy
|
||||
|
||||
resources:
|
||||
- secret.yaml
|
||||
- deployment.yaml
|
||||
- service.yaml
|
||||
- ingress.yaml
|
||||
- gotrue-ingress.yaml
|
||||
46
deploy/appflowy/secret.yaml
Normal file
46
deploy/appflowy/secret.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: appflowy-secrets
|
||||
namespace: appflowy
|
||||
stringData:
|
||||
FQDN: "orbit.panic.haus"
|
||||
SCHEME: "https"
|
||||
APPFLOWY_BASE_URL: "https://orbit.panic.haus"
|
||||
APPFLOWY_WEB_URL: "https://orbit.panic.haus"
|
||||
|
||||
# ==== PostgreSQL credentials ====
|
||||
GOTRUE_DATABASE_URL: "postgres://appflowy:AjUIkz5lcaEGpCrO9KHYAvaKbLsH2Q0e@postgres-base-rw.postgres.svc.cluster.local:5432/appflowy_db?search_path=auth"
|
||||
APPFLOWY_DATABASE_URL: "postgres://appflowy:AjUIkz5lcaEGpCrO9KHYAvaKbLsH2Q0e@postgres-base-rw.postgres.svc.cluster.local:5432/appflowy_db"
|
||||
|
||||
# ==== GoTrue (Auth) keys ====
|
||||
GOTRUE_JWT_SECRET: "5IqQzMmpRPoeParMsgoWIphrCYdhFhxz9NSyEQYlwGyTrRSsjInyMSaM44ZCH"
|
||||
GOTRUE_ADMIN_PASSWORD: "KaTPKUXiDUVIcUYWjqSy5SFdqrIl5csS"
|
||||
GOTRUE_SAML_PRIVATE_KEY: "MIIEpAIBAAKCAQEAz625FMeC/kzE3M1PcX9klYmq4cNJKCXFl3UAiu/VR+RsPoMskVloyNaeESx+C/XhjMySxOGyeAepO6haIybMFqbEiBPjkQNASYUcEdp+pfGliTkgkiffiq3qSIt+ZylVUniGEEnM3JznIoFlW9ikNDlCTRObasIQ0io3bwaRP5pnzMDAPc7i8xBlkybj8Mu3HZmGU+xqiv1zNP9kSWINsMm4wt6Lwqbqt+LNr0q+F3H9yORbErFEGRsAsPMTtPIwX8eUb241MU5WmQ8n7Ea/U1+E3scaPr44TSZg9Xl+KwEVhdX9yX6/QKBefv6d/IwgNVkHxyRmRh9dkONxhWZ/8wIDAQABAoIBAAQjEEhHLydUrSk+18HJiV3nN6W2p6rqkbSSKpgZ7fQ4KyXVpBojH1C84boy2jHvzHXrD1NnsY/tiyP6lw0TNUaQPOL/Dm3xlCLCyYvbf+FbXnJM1obCz5OqIjwetz5j1uTFLNp/NdsBLyODU1sQhjjaGSWC6fom8oHVQHRwO416Qz11ZrOzXB8WDUyPImFkT7hU5F2MJFLU94MY6dBC0NKQBWIvFZQMN8WHoTeTlDcdljN9qduqDFAdMZi6JW0YNr0Ycvgt5qn/Me5EFN3+s3RVRnTL/rSENKeKJFcDXes3XEKxbwtzMVqa6sHZrt6LJtN8jx3tpryD2priCjC0TU0CgYEA7RdDpqmgtkeWHeu5nfzJyE1TEvl2qIezhpNIBwYuzACWRWWbzK3XKaLMq91JJSacHLB9kYpp08Rzsk33m6OLk+Q7g1E8ltHyMvR8avX7kczbL4/FV50Ydb14MOrPPlL/xemL0/faIRmfhGaQ3XgOAIqCoYIPY3HHjCUAMRDpZI8CgYEA4D3xJ9+qCtqzwf6afBHfymkCkEn8mO+4dB6kdXIjppor0EW8Xvg4zDYMq9RmO/ROUQypljCLrwx9ZiElNPTwmIAFPWjuSpAEyzZdxEz0H01PhwERvdMtt6FFTSGRQsTUzWTa7oYAn8K/Fu4VkKBVdbmqQhfUdsk+/RqUHRw/iF0CgYEA400th6gSsw7YpeDr+MJ09brkTUmrcBGBlSC4qjtMPDrH1sp+XvG/WWSCErc5PAvTGVI/YHwxz1wFi8lh/O4DkAr8333Pt8yaBi4M5kLkJ7kd3nBYwxGSdLbsdwF3JQpPuv+YFeUGVDuLilUGx70kt3IToSHe/PkFVZ/XmjLbf5MCgYAHAQhKRYsrIZ+hvJEYtPo3eUYyOY1hPYOWZOqgHHuOlZwuui7jDH/BqSKGL3EuCDh2AZ4+aa/DPPGhwgFGgSwOp1kCjQd8Xrk3m7AcFIc/fwuv3NGwCyuPY8MlYJoH6tv2umK4NolIdC3Bypfz134z2iO+Qr5JI4oLH8xmiF5XpQKBgQDM+vmlxMcHfl0OcnAJuQ0SaqVk6ufrMKRg8dPSvn2G84LdF3Vbr0Qx0vCRrmz85Netj5RdqkQh1dhi/QWMGegMw+bPmrDM6/CCEhT+9e6v5r2iKt3BbskbWdyhTm/nX98Er139/0xllF5Cyx54Xq2cTnDEM/Zaq+UXREHTr/L61Q=="
|
||||
|
||||
# ==== Minio (S3) ====
|
||||
APPFLOWY_S3_MINIO_URL: "https://s3.minio.panic.haus"
|
||||
MINIO_HOST: "s3.minio.panic.haus"
|
||||
MINIO_PORT: "443"
|
||||
AWS_ACCESS_KEY: "rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow" # must match your Minio secret
|
||||
AWS_SECRET_KEY: "kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z" # must match your Minio secret
|
||||
APPFLOWY_S3_BUCKET: "appflowy" # your bucket name
|
||||
APPFLOWY_S3_USE_MINIO: "true"
|
||||
AWS_REGION: "cluster-panic-haus"
|
||||
# If you use AWS S3 instead of Minio, set APPFLOWY_S3_CREATE_BUCKET / AWS_REGION here.
|
||||
|
||||
# ==== GoTrue SMTP (optional) ====
|
||||
SMTP_HOST: "mail.mind-overflow.net"
|
||||
SMTP_PORT: "465"
|
||||
SMTP_USER: "cloud@mind-overflow.net"
|
||||
SMTP_PASS: "PcYchuLLUyfT2gvY4Tx7wQ575Tnqjx84zVNoP6Mb"
|
||||
SMTP_ADMIN_EMAIL: "hello@beatrice.wtf"
|
||||
|
||||
# ==== AppFlowy Mailer (Cloud) ====
|
||||
SMTP_EMAIL: "cloud@mind-overflow.net"
|
||||
SMTP_TLS_KIND: "wrapper" # "none" "wrapper" "required" "opportunistic"
|
||||
|
||||
# ==== Additional secrets for AppFlowy AI (if used) ====
|
||||
AI_OPENAI_API_KEY: ""
|
||||
|
||||
# (Optional) any other secrets you need can go here.
|
||||
95
deploy/appflowy/service.yaml
Normal file
95
deploy/appflowy/service.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: gotrue
|
||||
namespace: appflowy
|
||||
spec:
|
||||
ports:
|
||||
- port: 9999
|
||||
targetPort: 9999
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: gotrue
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: appflowy-cloud
|
||||
namespace: appflowy
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: appflowy-cloud
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: admin-frontend
|
||||
namespace: appflowy
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: admin-frontend
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: appflowy-worker
|
||||
namespace: appflowy
|
||||
spec:
|
||||
ports:
|
||||
- port: 8081
|
||||
targetPort: 8081
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: appflowy-worker
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: appflowy-web
|
||||
namespace: appflowy
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: appflowy-web
|
||||
type: ClusterIP
|
||||
|
||||
# (If you added appflowy-ai)
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: appflowy-ai
|
||||
namespace: appflowy
|
||||
spec:
|
||||
ports:
|
||||
- port: 5001
|
||||
targetPort: 5001
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: appflowy-ai
|
||||
type: ClusterIP
|
||||
@@ -6,6 +6,6 @@ metadata:
|
||||
spec:
|
||||
instances: 3
|
||||
storage:
|
||||
size: 10Gi
|
||||
size: 20Gi
|
||||
storageClass: longhorn
|
||||
enableSuperuserAccess: true
|
||||
19
deploy/descheduler/Chart.yaml
Normal file
19
deploy/descheduler/Chart.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
appVersion: 0.32.2
|
||||
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
|
||||
pods that can potentially be scheduled on better nodes. In the current implementation,
|
||||
descheduler does not schedule replacement of evicted pods but relies on the default
|
||||
scheduler for that.
|
||||
home: https://github.com/kubernetes-sigs/descheduler
|
||||
icon: https://kubernetes.io/images/favicon.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- descheduler
|
||||
- kube-scheduler
|
||||
maintainers:
|
||||
- email: kubernetes-sig-scheduling@googlegroups.com
|
||||
name: Kubernetes SIG Scheduling
|
||||
name: descheduler
|
||||
sources:
|
||||
- https://github.com/kubernetes-sigs/descheduler
|
||||
version: 0.32.2
|
||||
91
deploy/descheduler/README.md
Normal file
91
deploy/descheduler/README.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Descheduler for Kubernetes
|
||||
|
||||
[Descheduler](https://github.com/kubernetes-sigs/descheduler/) for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
|
||||
|
||||
## TL;DR:
|
||||
|
||||
```shell
|
||||
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
|
||||
helm install my-release --namespace kube-system descheduler/descheduler
|
||||
```
|
||||
|
||||
## Introduction
|
||||
|
||||
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.14+
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```shell
|
||||
helm install --namespace kube-system my-release descheduler/descheduler
|
||||
```
|
||||
|
||||
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```shell
|
||||
helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
|
||||
| `kind` | Use as CronJob or Deployment | `CronJob` |
|
||||
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
|
||||
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
|
||||
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Docker repository secrets | `[]` |
|
||||
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
|
||||
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
|
||||
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
|
||||
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
|
||||
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
|
||||
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
|
||||
| `timeZone` | configure `timeZone` for CronJob | `nil` |
|
||||
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
|
||||
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
|
||||
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
|
||||
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
|
||||
| `replicas` | The replica count for Deployment | `1` |
|
||||
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
|
||||
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
|
||||
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
|
||||
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
|
||||
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
|
||||
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
|
||||
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
|
||||
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
|
||||
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
|
||||
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
|
||||
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `service.enabled` | If `true`, create a service for deployment | `false` |
|
||||
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
|
||||
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
|
||||
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
|
||||
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
|
||||
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
|
||||
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
|
||||
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
|
||||
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
|
||||
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
|
||||
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
|
||||
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
|
||||
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
|
||||
| `commonLabels` | Labels to apply to all resources | `{}` |
|
||||
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |
|
||||
12
deploy/descheduler/templates/NOTES.txt
Normal file
12
deploy/descheduler/templates/NOTES.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
Descheduler installed as a {{ .Values.kind }}.
|
||||
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq (.Values.replicas | int) 1 }}
|
||||
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
|
||||
{{- end}}
|
||||
{{- if .Values.leaderElection }}
|
||||
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
|
||||
WARNING: You enabled DryRun mode, you can't use Leader Election.
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
104
deploy/descheduler/templates/_helpers.tpl
Normal file
104
deploy/descheduler/templates/_helpers.tpl
Normal file
@@ -0,0 +1,104 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "descheduler.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "descheduler.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Expand the namespace of the release.
|
||||
Allows overriding it for multi-namespace deployments in combined charts.
|
||||
*/}}
|
||||
{{- define "descheduler.namespace" -}}
|
||||
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "descheduler.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "descheduler.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||
helm.sh/chart: {{ include "descheduler.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.commonLabels}}
|
||||
{{ toYaml .Values.commonLabels }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "descheduler.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "descheduler.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "descheduler.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "descheduler.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Leader Election
|
||||
*/}}
|
||||
{{- define "descheduler.leaderElection"}}
|
||||
{{- if .Values.leaderElection -}}
|
||||
- --leader-elect={{ .Values.leaderElection.enabled }}
|
||||
{{- if .Values.leaderElection.leaseDuration }}
|
||||
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.renewDeadline }}
|
||||
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.retryPeriod }}
|
||||
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceLock }}
|
||||
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
|
||||
{{- end }}
|
||||
{{- if .Values.leaderElection.resourceName }}
|
||||
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
|
||||
{{- end }}
|
||||
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
|
||||
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
|
||||
{{- if $resourceNamespace -}}
|
||||
- --leader-elect-resource-namespace={{ $resourceNamespace }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
44
deploy/descheduler/templates/clusterrole.yaml
Normal file
44
deploy/descheduler/templates/clusterrole.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: ["events.k8s.io"]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["scheduling.k8s.io"]
|
||||
resources: ["priorityclasses"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
{{- if .Values.leaderElection.enabled }}
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["create", "update"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
|
||||
verbs: ["get", "patch", "delete"]
|
||||
{{- end }}
|
||||
{{- if and .Values.deschedulerPolicy .Values.deschedulerPolicy.metricsCollector .Values.deschedulerPolicy.metricsCollector.enabled }}
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list"]
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
16
deploy/descheduler/templates/clusterrolebinding.yaml
Normal file
16
deploy/descheduler/templates/clusterrolebinding.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
{{- end -}}
|
||||
14
deploy/descheduler/templates/configmap.yaml
Normal file
14
deploy/descheduler/templates/configmap.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
{{- if .Values.deschedulerPolicy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
data:
|
||||
policy.yaml: |
|
||||
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
|
||||
kind: "DeschedulerPolicy"
|
||||
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
|
||||
{{- end }}
|
||||
111
deploy/descheduler/templates/cronjob.yaml
Normal file
111
deploy/descheduler/templates/cronjob.yaml
Normal file
@@ -0,0 +1,111 @@
|
||||
{{- if eq .Values.kind "CronJob" }}
|
||||
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
schedule: {{ .Values.schedule | quote }}
|
||||
{{- if .Values.suspend }}
|
||||
suspend: {{ .Values.suspend }}
|
||||
{{- end }}
|
||||
concurrencyPolicy: "Forbid"
|
||||
{{- if .Values.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.successfulJobsHistoryLimit nil }}
|
||||
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.failedJobsHistoryLimit nil }}
|
||||
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
|
||||
{{- end }}
|
||||
{{- if .Values.timeZone }}
|
||||
timeZone: {{ .Values.timeZone }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
{{- if .Values.ttlSecondsAfterFinished }}
|
||||
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- .Values.podAnnotations | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 12 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- .Values.podLabels | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
restartPolicy: "Never"
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 16 }}
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 16 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 16 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- end }}
|
||||
100
deploy/descheduler/templates/deployment.yaml
Normal file
100
deploy/descheduler/templates/deployment.yaml
Normal file
@@ -0,0 +1,100 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if gt (.Values.replicas | int) 1 }}
|
||||
{{- if not .Values.leaderElection.enabled }}
|
||||
{{- fail "You must set leaderElection to use more than 1 replica"}}
|
||||
{{- end}}
|
||||
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
|
||||
{{- else }}
|
||||
replicas: 1
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- .Values.podLabels | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- .Values.podAnnotations | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- .Values.dnsConfig | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
{{- toYaml .Values.command | nindent 12 }}
|
||||
args:
|
||||
- --policy-config-file=/policy-dir/policy.yaml
|
||||
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
|
||||
{{- range $key, $value := .Values.cmdOptions }}
|
||||
{{- if ne $value nil }}
|
||||
- {{ printf "--%s=%s" $key (toString $value) }}
|
||||
{{- else }}
|
||||
- {{ printf "--%s" $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "descheduler.leaderElection" . | nindent 12 }}
|
||||
ports:
|
||||
{{- toYaml .Values.ports | nindent 12 }}
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /policy-dir
|
||||
name: policy-volume
|
||||
{{- if .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: policy-volume
|
||||
configMap:
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
27
deploy/descheduler/templates/service.yaml
Normal file
27
deploy/descheduler/templates/service.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.service.enabled true }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
name: {{ template "descheduler.fullname" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
{{- if .Values.service.ipFamilyPolicy }}
|
||||
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.ipFamilies }}
|
||||
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10258
|
||||
protocol: TCP
|
||||
targetPort: 10258
|
||||
selector:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 4 }}
|
||||
type: ClusterIP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
12
deploy/descheduler/templates/serviceaccount.yaml
Normal file
12
deploy/descheduler/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "descheduler.serviceAccountName" . }}
|
||||
namespace: {{ include "descheduler.namespace" . }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
44
deploy/descheduler/templates/servicemonitor.yaml
Normal file
44
deploy/descheduler/templates/servicemonitor.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
{{- if eq .Values.kind "Deployment" }}
|
||||
{{- if eq .Values.serviceMonitor.enabled true }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ template "descheduler.fullname" . }}-servicemonitor
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "descheduler.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
jobLabel: jobLabel
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ include "descheduler.namespace" . }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "descheduler.selectorLabels" . | nindent 6 }}
|
||||
endpoints:
|
||||
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
|
||||
port: http-metrics
|
||||
{{- if .Values.serviceMonitor.interval }}
|
||||
interval: {{ .Values.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
|
||||
insecureSkipVerify: true
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.serverName }}
|
||||
serverName: {{ .Values.serviceMonitor.serverName }}
|
||||
{{- end}}
|
||||
{{- if .Values.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- if .Values.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
17
deploy/descheduler/tests/cronjob_test.yaml
Normal file
17
deploy/descheduler/tests/cronjob_test.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
suite: Test Descheduler CronJob
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: CronJob
|
||||
|
||||
tests:
|
||||
- it: creates CronJob when kind is set
|
||||
template: templates/cronjob.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: CronJob
|
||||
49
deploy/descheduler/tests/deployment_test.yaml
Normal file
49
deploy/descheduler/tests/deployment_test.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
suite: Test Descheduler Deployment
|
||||
|
||||
templates:
|
||||
- "*.yaml"
|
||||
|
||||
release:
|
||||
name: descheduler
|
||||
|
||||
set:
|
||||
kind: Deployment
|
||||
|
||||
tests:
|
||||
- it: creates Deployment when kind is set
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
|
||||
- it: enables leader-election
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect=true
|
||||
|
||||
- it: support leader-election resourceNamespace
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamespace: test
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=test
|
||||
|
||||
- it: support legacy leader-election resourceNamescape
|
||||
set:
|
||||
leaderElection:
|
||||
enabled: true
|
||||
resourceNamescape: typo
|
||||
template: templates/deployment.yaml
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: --leader-elect-resource-namespace=typo
|
||||
252
deploy/descheduler/values.yaml
Normal file
252
deploy/descheduler/values.yaml
Normal file
@@ -0,0 +1,252 @@
|
||||
# Default values for descheduler.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# CronJob or Deployment
|
||||
kind: CronJob
|
||||
|
||||
image:
|
||||
repository: registry.k8s.io/descheduler/descheduler
|
||||
# Overrides the image tag whose default is the chart version
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets:
|
||||
# - name: container-registry-secret
|
||||
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
|
||||
ports:
|
||||
- containerPort: 10258
|
||||
protocol: TCP
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
privileged: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 1000
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
# -- Override the deployment namespace; defaults to .Release.Namespace
|
||||
namespaceOverride: ""
|
||||
|
||||
# labels that'll be applied to all resources
|
||||
commonLabels: {}
|
||||
|
||||
cronJobApiVersion: "batch/v1"
|
||||
schedule: "*/2 * * * *"
|
||||
suspend: false
|
||||
# startingDeadlineSeconds: 200
|
||||
# successfulJobsHistoryLimit: 3
|
||||
# failedJobsHistoryLimit: 1
|
||||
# ttlSecondsAfterFinished 600
|
||||
# timeZone: Etc/UTC
|
||||
|
||||
# Required when running as a Deployment
|
||||
deschedulingInterval: 5m
|
||||
|
||||
# Specifies the replica count for Deployment
|
||||
# Set leaderElection if you want to use more than 1 replica
|
||||
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
|
||||
# only if that node is in the same zone as at least one already-running descheduler
|
||||
replicas: 1
|
||||
|
||||
# Specifies whether Leader Election resources should be created
|
||||
# Required when running as a Deployment
|
||||
# NOTE: Leader election can't be activated if DryRun enabled
|
||||
leaderElection: {}
|
||||
# enabled: true
|
||||
# leaseDuration: 15s
|
||||
# renewDeadline: 10s
|
||||
# retryPeriod: 2s
|
||||
# resourceLock: "leases"
|
||||
# resourceName: "descheduler"
|
||||
# resourceNamespace: "kube-system"
|
||||
|
||||
command:
|
||||
- "/bin/descheduler"
|
||||
|
||||
cmdOptions:
|
||||
v: 3
|
||||
|
||||
# Recommended to use the latest Policy API version supported by the Descheduler app version
|
||||
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
|
||||
|
||||
# deschedulerPolicy contains the policies the descheduler will execute.
|
||||
# To use policies stored in an existing configMap use:
|
||||
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
|
||||
# deschedulerPolicy: {}
|
||||
deschedulerPolicy:
|
||||
# nodeSelector: "key1=value1,key2=value2"
|
||||
# maxNoOfPodsToEvictPerNode: 10
|
||||
# maxNoOfPodsToEvictPerNamespace: 10
|
||||
# metricsCollector:
|
||||
# enabled: true
|
||||
# ignorePvcPods: true
|
||||
# evictLocalStoragePods: true
|
||||
# evictDaemonSetPods: true
|
||||
# tracing:
|
||||
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
|
||||
# transportCert: ""
|
||||
# serviceName: ""
|
||||
# serviceNamespace: ""
|
||||
# sampleRate: 1.0
|
||||
# fallbackToNoOpProviderOnError: true
|
||||
profiles:
|
||||
- name: default
|
||||
pluginConfig:
|
||||
- name: DefaultEvictor
|
||||
args:
|
||||
ignorePvcPods: true
|
||||
evictLocalStoragePods: true
|
||||
- name: RemoveDuplicates
|
||||
- name: RemovePodsHavingTooManyRestarts
|
||||
args:
|
||||
podRestartThreshold: 100
|
||||
includingInitContainers: true
|
||||
- name: RemovePodsViolatingNodeAffinity
|
||||
args:
|
||||
nodeAffinityType:
|
||||
- requiredDuringSchedulingIgnoredDuringExecution
|
||||
- name: RemovePodsViolatingNodeTaints
|
||||
- name: RemovePodsViolatingInterPodAntiAffinity
|
||||
- name: RemovePodsViolatingTopologySpreadConstraint
|
||||
- name: LowNodeUtilization
|
||||
args:
|
||||
thresholds:
|
||||
cpu: 40
|
||||
memory: 30
|
||||
pods: 30
|
||||
targetThresholds:
|
||||
cpu: 50
|
||||
memory: 60
|
||||
pods: 50
|
||||
plugins:
|
||||
balance:
|
||||
enabled:
|
||||
- RemoveDuplicates
|
||||
- RemovePodsViolatingTopologySpreadConstraint
|
||||
- LowNodeUtilization
|
||||
deschedule:
|
||||
enabled:
|
||||
- RemovePodsHavingTooManyRestarts
|
||||
- RemovePodsViolatingNodeTaints
|
||||
- RemovePodsViolatingNodeAffinity
|
||||
- RemovePodsViolatingInterPodAntiAffinity
|
||||
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
nodeSelector: {}
|
||||
# foo: bar
|
||||
|
||||
affinity: {}
|
||||
# nodeAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# nodeSelectorTerms:
|
||||
# - matchExpressions:
|
||||
# - key: kubernetes.io/e2e-az-name
|
||||
# operator: In
|
||||
# values:
|
||||
# - e2e-az1
|
||||
# - e2e-az2
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app.kubernetes.io/name
|
||||
# operator: In
|
||||
# values:
|
||||
# - descheduler
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
topologySpreadConstraints: []
|
||||
# - maxSkew: 1
|
||||
# topologyKey: kubernetes.io/hostname
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: descheduler
|
||||
tolerations: []
|
||||
# - key: 'management'
|
||||
# operator: 'Equal'
|
||||
# value: 'tool'
|
||||
# effect: 'NoSchedule'
|
||||
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
# Specifies custom annotations for the serviceAccount
|
||||
annotations: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podLabels: {}
|
||||
|
||||
dnsConfig: {}
|
||||
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10258
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 10
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
|
||||
#
|
||||
ipFamilyPolicy: ""
|
||||
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
|
||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
|
||||
# E.g.
|
||||
# ipFamilies:
|
||||
# - IPv6
|
||||
# - IPv4
|
||||
ipFamilies: []
|
||||
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# The namespace where Prometheus expects to find service monitors.
|
||||
# namespace: ""
|
||||
# Add custom labels to the ServiceMonitor resource
|
||||
additionalLabels: {}
|
||||
# prometheus: kube-prometheus-stack
|
||||
interval: ""
|
||||
# honorLabels: true
|
||||
insecureSkipVerify: true
|
||||
serverName: null
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'descheduler_(build_info|pods_evicted)'
|
||||
# sourceLabels: [__name__]
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
10217
deploy/elk-stack/crds.yaml
Normal file
10217
deploy/elk-stack/crds.yaml
Normal file
File diff suppressed because it is too large
Load Diff
21
deploy/elk-stack/elasticsearch.yaml
Normal file
21
deploy/elk-stack/elasticsearch.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: elasticsearch.k8s.elastic.co/v1
|
||||
kind: Elasticsearch
|
||||
metadata:
|
||||
name: elasticsearch-ha
|
||||
spec:
|
||||
version: 8.17.4
|
||||
nodeSets:
|
||||
- name: default
|
||||
count: 3
|
||||
config:
|
||||
node.store.allow_mmap: false
|
||||
# volumeClaimTemplates:
|
||||
# - metadata:
|
||||
# name: elasticsearch-data
|
||||
# spec:
|
||||
# accessModes:
|
||||
# - ReadWriteOnce
|
||||
# storageClassName: longhorn
|
||||
# resources:
|
||||
# requests:
|
||||
# storage: 5Gi
|
||||
26
deploy/elk-stack/kibana-ingress.yaml
Normal file
26
deploy/elk-stack/kibana-ingress.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: kibana-ingress
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- query.prod.panic.haus
|
||||
secretName: kibana-tls
|
||||
|
||||
rules:
|
||||
- host: query.prod.panic.haus
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: kibana-ha-kb-http
|
||||
port:
|
||||
number: 5601
|
||||
9
deploy/elk-stack/kibana.yaml
Normal file
9
deploy/elk-stack/kibana.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: kibana.k8s.elastic.co/v1
|
||||
kind: Kibana
|
||||
metadata:
|
||||
name: kibana-ha
|
||||
spec:
|
||||
version: 8.17.4
|
||||
count: 2
|
||||
elasticsearchRef:
|
||||
name: elasticsearch-ha
|
||||
11
deploy/elk-stack/kustomization.yaml
Normal file
11
deploy/elk-stack/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: elastic-system
|
||||
resources:
|
||||
# - crds.yaml
|
||||
- operator.yaml
|
||||
- elasticsearch.yaml
|
||||
- kibana.yaml
|
||||
- kibana-ingress.yaml
|
||||
# - longstash.yaml
|
||||
58
deploy/elk-stack/longstash.yaml
Normal file
58
deploy/elk-stack/longstash.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: logstash
|
||||
namespace: elasticsearch
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: logstash
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: logstash
|
||||
spec:
|
||||
containers:
|
||||
- name: logstash
|
||||
image: docker.io/bitnami/logstash:7.17.0
|
||||
# Customize environment variables and command-line args as needed
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: "elasticsearch-ha-es-http" # Adjust based on your ES service DNS name
|
||||
ports:
|
||||
- containerPort: 9600
|
||||
volumeMounts:
|
||||
- name: logstash-data
|
||||
mountPath: /bitnami
|
||||
volumes:
|
||||
- name: logstash-data
|
||||
persistentVolumeClaim:
|
||||
claimName: logstash-pvc
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: logstash-pvc
|
||||
namespace: elasticsearch
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: longhorn
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: logstash
|
||||
namespace: elasticsearch
|
||||
spec:
|
||||
selector:
|
||||
app: logstash
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 9600
|
||||
targetPort: 9600
|
||||
797
deploy/elk-stack/operator.yaml
Normal file
797
deploy/elk-stack/operator.yaml
Normal file
@@ -0,0 +1,797 @@
|
||||
# Source: eck-operator/templates/operator-namespace.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elastic-system
|
||||
labels:
|
||||
name: elastic-system
|
||||
---
|
||||
# Source: eck-operator/templates/service-account.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
automountServiceAccountToken: true
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
---
|
||||
# Source: eck-operator/templates/webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: elastic-webhook-server-cert
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
---
|
||||
# Source: eck-operator/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
data:
|
||||
eck.yaml: |-
|
||||
log-verbosity: 0
|
||||
metrics-port: 0
|
||||
metrics-secure: false
|
||||
container-registry: docker.elastic.co
|
||||
max-concurrent-reconciles: 3
|
||||
ca-cert-validity: 8760h
|
||||
ca-cert-rotate-before: 24h
|
||||
cert-validity: 8760h
|
||||
cert-rotate-before: 24h
|
||||
disable-config-watch: false
|
||||
exposed-node-labels: [topology.kubernetes.io/.*,failure-domain.beta.kubernetes.io/.*]
|
||||
set-default-security-context: auto-detect
|
||||
kube-client-timeout: 60s
|
||||
elasticsearch-client-timeout: 180s
|
||||
disable-telemetry: false
|
||||
distribution-channel: all-in-one
|
||||
validate-storage-class: true
|
||||
enable-webhook: true
|
||||
webhook-name: elastic-webhook.k8s.elastic.co
|
||||
webhook-port: 9443
|
||||
operator-namespace: elastic-system
|
||||
enable-leader-election: true
|
||||
elasticsearch-observation-interval: 10s
|
||||
ubi-only: false
|
||||
---
|
||||
# Source: eck-operator/templates/cluster-roles.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "authorization.k8s.io"
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
resourceNames:
|
||||
- elastic-operator-leader
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- events
|
||||
- persistentvolumeclaims
|
||||
- secrets
|
||||
- services
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- statefulsets
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
resources:
|
||||
- elasticsearches
|
||||
- elasticsearches/status
|
||||
- elasticsearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- autoscaling.k8s.elastic.co
|
||||
resources:
|
||||
- elasticsearchautoscalers
|
||||
- elasticsearchautoscalers/status
|
||||
- elasticsearchautoscalers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
resources:
|
||||
- kibanas
|
||||
- kibanas/status
|
||||
- kibanas/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
resources:
|
||||
- apmservers
|
||||
- apmservers/status
|
||||
- apmservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
resources:
|
||||
- enterprisesearches
|
||||
- enterprisesearches/status
|
||||
- enterprisesearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- beat.k8s.elastic.co
|
||||
resources:
|
||||
- beats
|
||||
- beats/status
|
||||
- beats/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- agent.k8s.elastic.co
|
||||
resources:
|
||||
- agents
|
||||
- agents/status
|
||||
- agents/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- maps.k8s.elastic.co
|
||||
resources:
|
||||
- elasticmapsservers
|
||||
- elasticmapsservers/status
|
||||
- elasticmapsservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- stackconfigpolicy.k8s.elastic.co
|
||||
resources:
|
||||
- stackconfigpolicies
|
||||
- stackconfigpolicies/status
|
||||
- stackconfigpolicies/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- logstash.k8s.elastic.co
|
||||
resources:
|
||||
- logstashes
|
||||
- logstashes/status
|
||||
- logstashes/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- admissionregistration.k8s.io
|
||||
resources:
|
||||
- validatingwebhookconfigurations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Source: eck-operator/templates/cluster-roles.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "elastic-operator-view"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
rules:
|
||||
- apiGroups: ["elasticsearch.k8s.elastic.co"]
|
||||
resources: ["elasticsearches"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["autoscaling.k8s.elastic.co"]
|
||||
resources: ["elasticsearchautoscalers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apm.k8s.elastic.co"]
|
||||
resources: ["apmservers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["kibana.k8s.elastic.co"]
|
||||
resources: ["kibanas"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
|
||||
resources: ["enterprisesearches"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["beat.k8s.elastic.co"]
|
||||
resources: ["beats"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["agent.k8s.elastic.co"]
|
||||
resources: ["agents"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["maps.k8s.elastic.co"]
|
||||
resources: ["elasticmapsservers"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
|
||||
resources: ["stackconfigpolicies"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["logstash.k8s.elastic.co"]
|
||||
resources: ["logstashes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
# Source: eck-operator/templates/cluster-roles.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: "elastic-operator-edit"
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
rules:
|
||||
- apiGroups: ["elasticsearch.k8s.elastic.co"]
|
||||
resources: ["elasticsearches"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["autoscaling.k8s.elastic.co"]
|
||||
resources: ["elasticsearchautoscalers"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["apm.k8s.elastic.co"]
|
||||
resources: ["apmservers"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["kibana.k8s.elastic.co"]
|
||||
resources: ["kibanas"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
|
||||
resources: ["enterprisesearches"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["beat.k8s.elastic.co"]
|
||||
resources: ["beats"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["agent.k8s.elastic.co"]
|
||||
resources: ["agents"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["maps.k8s.elastic.co"]
|
||||
resources: ["elasticmapsservers"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
|
||||
resources: ["stackconfigpolicies"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
- apiGroups: ["logstash.k8s.elastic.co"]
|
||||
resources: ["logstashes"]
|
||||
verbs: ["create", "delete", "deletecollection", "patch", "update"]
|
||||
---
|
||||
# Source: eck-operator/templates/role-bindings.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elastic-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
---
|
||||
# Source: eck-operator/templates/webhook.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: 9443
|
||||
selector:
|
||||
control-plane: elastic-operator
|
||||
---
|
||||
# Source: eck-operator/templates/statefulset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elastic-operator
|
||||
namespace: elastic-system
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
control-plane: elastic-operator
|
||||
serviceName: elastic-operator
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# Rename the fields "error" to "error.message" and "source" to "event.source"
|
||||
# This is to avoid a conflict with the ECS "error" and "source" documents.
|
||||
"co.elastic.logs/raw": "[{\"type\":\"container\",\"json.keys_under_root\":true,\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
|
||||
"checksum/config": 7c44077bea6cc3ad577d0f45159ddc8c6096c69128668786fc104a4ce081d4d2
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
serviceAccountName: elastic-operator
|
||||
automountServiceAccountToken: true
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
containers:
|
||||
- image: "docker.elastic.co/eck/eck-operator:2.16.1"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
args:
|
||||
- "manager"
|
||||
- "--config=/conf/eck.yaml"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
env:
|
||||
- name: OPERATOR_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: WEBHOOK_SECRET
|
||||
value: elastic-webhook-server-cert
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 150Mi
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: https-webhook
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: "/conf"
|
||||
name: conf
|
||||
readOnly: true
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: elastic-operator
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: elastic-webhook-server-cert
|
||||
---
|
||||
# Source: eck-operator/templates/webhook.yaml
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: elastic-webhook.k8s.elastic.co
|
||||
labels:
|
||||
control-plane: elastic-operator
|
||||
app.kubernetes.io/version: "2.16.1"
|
||||
webhooks:
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-agent-k8s-elastic-co-v1alpha1-agent
|
||||
failurePolicy: Ignore
|
||||
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- agent.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- agents
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-apm-k8s-elastic-co-v1-apmserver
|
||||
failurePolicy: Ignore
|
||||
name: elastic-apm-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- apmservers
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-apm-k8s-elastic-co-v1beta1-apmserver
|
||||
failurePolicy: Ignore
|
||||
name: elastic-apm-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apm.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- apmservers
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-beat-k8s-elastic-co-v1beta1-beat
|
||||
failurePolicy: Ignore
|
||||
name: elastic-beat-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- beat.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- beats
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-enterprisesearch-k8s-elastic-co-v1-enterprisesearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-ent-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- enterprisesearches
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-enterprisesearch-k8s-elastic-co-v1beta1-enterprisesearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-ent-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- enterprisesearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- enterprisesearches
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-elasticsearch-k8s-elastic-co-v1-elasticsearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-es-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- elasticsearches
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-elasticsearch-k8s-elastic-co-v1beta1-elasticsearch
|
||||
failurePolicy: Ignore
|
||||
name: elastic-es-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- elasticsearch.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- elasticsearches
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-ems-k8s-elastic-co-v1alpha1-mapsservers
|
||||
failurePolicy: Ignore
|
||||
name: elastic-ems-validation-v1alpha1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- maps.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- mapsservers
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-kibana-k8s-elastic-co-v1-kibana
|
||||
failurePolicy: Ignore
|
||||
name: elastic-kb-validation-v1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- kibanas
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-kibana-k8s-elastic-co-v1beta1-kibana
|
||||
failurePolicy: Ignore
|
||||
name: elastic-kb-validation-v1beta1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kibana.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- kibanas
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-autoscaling-k8s-elastic-co-v1alpha1-elasticsearchautoscaler
|
||||
failurePolicy: Ignore
|
||||
name: elastic-esa-validation-v1alpha1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- autoscaling.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- elasticsearchautoscalers
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-scp-k8s-elastic-co-v1alpha1-stackconfigpolicies
|
||||
failurePolicy: Ignore
|
||||
name: elastic-scp-validation-v1alpha1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- stackconfigpolicy.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- stackconfigpolicies
|
||||
- clientConfig:
|
||||
service:
|
||||
name: elastic-webhook-server
|
||||
namespace: elastic-system
|
||||
path: /validate-logstash-k8s-elastic-co-v1alpha1-logstash
|
||||
failurePolicy: Ignore
|
||||
name: elastic-logstash-validation-v1alpha1.k8s.elastic.co
|
||||
matchPolicy: Exact
|
||||
admissionReviewVersions: [v1, v1beta1]
|
||||
sideEffects: None
|
||||
rules:
|
||||
- apiGroups:
|
||||
- logstash.k8s.elastic.co
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- logstashes
|
||||
|
||||
5
deploy/gitea/values.yaml
Normal file
5
deploy/gitea/values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: longhorn
|
||||
accessMode: ReadWriteOnce
|
||||
size: 20Gi
|
||||
@@ -13,3 +13,12 @@ data:
|
||||
url: http://prometheus-k8s.monitoring:9090
|
||||
isDefault: true
|
||||
editable: false
|
||||
- name: Elasticsearch
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: https://elasticsearch-ha-es-http.elastic-system.svc:9200
|
||||
jsonData:
|
||||
esVersion: 8.17.4
|
||||
timeField: "@timestamp"
|
||||
tlsSkipVerify: true
|
||||
editable: false
|
||||
@@ -5,4 +5,4 @@ resources:
|
||||
- grafana-deploy.yaml
|
||||
- grafana-ingress.yaml
|
||||
- grafana-svc.yaml
|
||||
- prometheus-ds.yaml
|
||||
- data-sources.yaml
|
||||
|
||||
18
deploy/grafana/secret.yaml
Normal file
18
deploy/grafana/secret.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-db-secret
|
||||
namespace: grafana
|
||||
type: Opaque
|
||||
data:
|
||||
username: Z3JhZmFuYQ==
|
||||
password: dndyMGc5aWpoMGIzaXJka3ZqMG1ndXZoM3I=
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-oauth-secret
|
||||
namespace: grafana
|
||||
type: Opaque
|
||||
data:
|
||||
client-secret: VFVEYU5uY091b1Y1QzFmeUJaeXN3ZzNEU3VYWU9laEQ=
|
||||
@@ -114,13 +114,13 @@ spec:
|
||||
httpGet:
|
||||
path: /health/live
|
||||
port: 9000 # Use management port for liveness
|
||||
initialDelaySeconds: 60
|
||||
initialDelaySeconds: 90
|
||||
periodSeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health/ready
|
||||
port: 9000 # Use management port for readiness
|
||||
initialDelaySeconds: 30
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 15
|
||||
affinity:
|
||||
# Spread pods across different nodes for higher availability
|
||||
|
||||
9
deploy/keycloak/secret.yaml
Normal file
9
deploy/keycloak/secret.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: keycloak-db-secret
|
||||
namespace: keycloak
|
||||
type: Opaque
|
||||
data:
|
||||
username: a2V5Y2xvYWs= # base64 encoded
|
||||
password: dTgyNXFDTnhmckJTY0tUb1RkM1c5ektWUHhwVnNpN0w= # base64 encoded
|
||||
@@ -2,3 +2,6 @@ namespace: longhorn-system
|
||||
resources:
|
||||
- longhorn-deploy.yaml
|
||||
- longhorn-ingress.yaml
|
||||
- oauth2-proxy-longhorn-ingress.yaml
|
||||
- oauth2-proxy-longhorn-service.yaml
|
||||
- oauth2-proxy-longhorn.yaml
|
||||
|
||||
@@ -103,7 +103,7 @@ data:
|
||||
reclaimPolicy: "Delete"
|
||||
volumeBindingMode: Immediate
|
||||
parameters:
|
||||
numberOfReplicas: "3"
|
||||
numberOfReplicas: "1"
|
||||
staleReplicaTimeout: "30"
|
||||
fromBackup: ""
|
||||
fsType: "ext4"
|
||||
|
||||
@@ -4,15 +4,15 @@ metadata:
|
||||
name: longhorn-ingress
|
||||
namespace: longhorn-system
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2/start?rd=$escaped_request_uri
|
||||
nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2/auth
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- longhorn.prod.panic.haus
|
||||
secretName: longhorn-tls
|
||||
|
||||
rules:
|
||||
- host: longhorn.prod.panic.haus
|
||||
http:
|
||||
|
||||
19
deploy/longhorn/oauth2-proxy-longhorn-ingress.yaml
Normal file
19
deploy/longhorn/oauth2-proxy-longhorn-ingress.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: oauth2-proxy-longhorn-ingress
|
||||
namespace: longhorn-system
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
spec:
|
||||
rules:
|
||||
- host: longhorn.prod.panic.haus
|
||||
http:
|
||||
paths:
|
||||
- path: /oauth2
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: oauth2-proxy-longhorn-service
|
||||
port:
|
||||
number: 4180
|
||||
13
deploy/longhorn/oauth2-proxy-longhorn-service.yaml
Normal file
13
deploy/longhorn/oauth2-proxy-longhorn-service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: oauth2-proxy-longhorn-service
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 4180
|
||||
targetPort: 4180
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: oauth2-proxy-longhorn
|
||||
38
deploy/longhorn/oauth2-proxy-longhorn.yaml
Normal file
38
deploy/longhorn/oauth2-proxy-longhorn.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: oauth2-proxy-longhorn
|
||||
namespace: longhorn-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: oauth2-proxy-longhorn
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: oauth2-proxy-longhorn
|
||||
spec:
|
||||
containers:
|
||||
- name: oauth2-proxy-longhorn
|
||||
image: quay.io/oauth2-proxy/oauth2-proxy:v7.8.2
|
||||
args:
|
||||
- --provider=keycloak
|
||||
- --client-id=longhorn
|
||||
- --client-secret=gxyMUP89svnEXnz128ZqNBTLxjlLpBxM
|
||||
- --cookie-secret=1arlufhiIIvTT3iPexXVREeo8YDX-ZLk3k33-98FPRM=
|
||||
- --oidc-issuer-url=https://sso.panic.haus/realms/panic-haus
|
||||
- --cookie-domain=longhorn.prod.panic.haus
|
||||
- --email-domain=*
|
||||
# - --session-store-type=redis
|
||||
# - --redis-connection-url=redis://redis-lb.redis.svc.cluster.local:6379
|
||||
- --http-address=0.0.0.0:4180
|
||||
- --redirect-url=https://longhorn.prod.panic.haus/oauth2/callback
|
||||
- --upstream=http://longhorn-frontend.longhorn-system.svc.cluster.local:80
|
||||
- --scope=openid
|
||||
- --login-url=https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/auth
|
||||
- --validate-url=https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/userinfo
|
||||
- --redeem-url=https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/token
|
||||
ports:
|
||||
- containerPort: 4180
|
||||
name: http
|
||||
12
deploy/minio-operator/base/cluster-role-binding.yaml
Normal file
12
deploy/minio-operator/base/cluster-role-binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: minio-operator-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: minio-operator-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: minio-operator
|
||||
namespace: default
|
||||
178
deploy/minio-operator/base/cluster-role.yaml
Normal file
178
deploy/minio-operator/base/cluster-role.yaml
Normal file
@@ -0,0 +1,178 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: minio-operator-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "apiextensions.k8s.io"
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
- events
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- list
|
||||
- delete
|
||||
- deletecollection
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
- list
|
||||
- delete
|
||||
- deletecollection
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
- rolebindings
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
- deployments
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- list
|
||||
- patch
|
||||
- watch
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- "certificates.k8s.io"
|
||||
resources:
|
||||
- "certificatesigningrequests"
|
||||
- "certificatesigningrequests/approval"
|
||||
- "certificatesigningrequests/status"
|
||||
verbs:
|
||||
- update
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- list
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resourceNames:
|
||||
- kubernetes.io/legacy-unknown
|
||||
- kubernetes.io/kube-apiserver-client
|
||||
- kubernetes.io/kubelet-serving
|
||||
- beta.eks.amazonaws.com/app-serving
|
||||
resources:
|
||||
- signers
|
||||
verbs:
|
||||
- approve
|
||||
- sign
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- minio.min.io
|
||||
- sts.min.io
|
||||
- job.min.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups:
|
||||
- min.io
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- "*"
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- prometheuses
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- create
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- deletecollection
|
||||
5
deploy/minio-operator/base/crds/kustomization.yaml
Normal file
5
deploy/minio-operator/base/crds/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
resources:
|
||||
- minio.min.io_tenants.yaml
|
||||
- sts.min.io_policybindings.yaml
|
||||
5749
deploy/minio-operator/base/crds/minio.min.io_tenants.yaml
Normal file
5749
deploy/minio-operator/base/crds/minio.min.io_tenants.yaml
Normal file
File diff suppressed because it is too large
Load Diff
133
deploy/minio-operator/base/crds/sts.min.io_policybindings.yaml
Normal file
133
deploy/minio-operator/base/crds/sts.min.io_policybindings.yaml
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.17.2
|
||||
operator.min.io/version: v7.0.1
|
||||
name: policybindings.sts.min.io
|
||||
spec:
|
||||
group: sts.min.io
|
||||
names:
|
||||
kind: PolicyBinding
|
||||
listKind: PolicyBindingList
|
||||
plural: policybindings
|
||||
shortNames:
|
||||
- policybinding
|
||||
singular: policybinding
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.currentState
|
||||
name: State
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
properties:
|
||||
application:
|
||||
properties:
|
||||
namespace:
|
||||
type: string
|
||||
serviceaccount:
|
||||
type: string
|
||||
required:
|
||||
- namespace
|
||||
- serviceaccount
|
||||
type: object
|
||||
policies:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- application
|
||||
- policies
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
currentState:
|
||||
type: string
|
||||
usage:
|
||||
nullable: true
|
||||
properties:
|
||||
authotizations:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- currentState
|
||||
- usage
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: false
|
||||
subresources:
|
||||
status: {}
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.currentState
|
||||
name: State
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
kind:
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
properties:
|
||||
application:
|
||||
properties:
|
||||
namespace:
|
||||
type: string
|
||||
serviceaccount:
|
||||
type: string
|
||||
required:
|
||||
- namespace
|
||||
- serviceaccount
|
||||
type: object
|
||||
policies:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- application
|
||||
- policies
|
||||
type: object
|
||||
status:
|
||||
properties:
|
||||
currentState:
|
||||
type: string
|
||||
usage:
|
||||
nullable: true
|
||||
properties:
|
||||
authotizations:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
required:
|
||||
- currentState
|
||||
- usage
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
59
deploy/minio-operator/base/deployment.yaml
Normal file
59
deploy/minio-operator/base/deployment.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: minio-operator
|
||||
namespace: minio-operator
|
||||
labels:
|
||||
app.kubernetes.io/instance: minio-operator
|
||||
app.kubernetes.io/name: operator
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
name: minio-operator
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: minio-operator
|
||||
app.kubernetes.io/instance: minio-operator
|
||||
app.kubernetes.io/name: operator
|
||||
spec:
|
||||
serviceAccountName: minio-operator
|
||||
containers:
|
||||
- name: minio-operator
|
||||
image: minio/operator:v7.0.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- controller
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
ephemeral-storage: 500Mi
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
env:
|
||||
- name: MINIO_CONSOLE_TLS_ENABLE
|
||||
value: "off"
|
||||
- name: OPERATOR_STS_ENABLED
|
||||
value: "on"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: name
|
||||
operator: In
|
||||
values:
|
||||
- minio-operator
|
||||
topologyKey: kubernetes.io/hostname
|
||||
6
deploy/minio-operator/base/kustomization.yaml
Normal file
6
deploy/minio-operator/base/kustomization.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
|
||||
resources:
|
||||
- crds/
|
||||
11
deploy/minio-operator/base/namespace.yaml
Normal file
11
deploy/minio-operator/base/namespace.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: minio-operator
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: restricted
|
||||
pod-security.kubernetes.io/enforce-version: latest
|
||||
pod-security.kubernetes.io/audit: restricted
|
||||
pod-security.kubernetes.io/audit-version: latest
|
||||
pod-security.kubernetes.io/warn: restricted
|
||||
pod-security.kubernetes.io/warn-version: latest
|
||||
5
deploy/minio-operator/base/service-account.yaml
Normal file
5
deploy/minio-operator/base/service-account.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: minio-operator
|
||||
namespace: default
|
||||
33
deploy/minio-operator/base/service.yaml
Normal file
33
deploy/minio-operator/base/service.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: operator # Please do not change this value
|
||||
labels:
|
||||
name: minio-operator
|
||||
app.kubernetes.io/instance: minio-operator
|
||||
app.kubernetes.io/name: operator
|
||||
namespace: minio-operator
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 4221
|
||||
name: http
|
||||
selector:
|
||||
name: minio-operator
|
||||
operator: leader
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: sts # Please do not change this value
|
||||
labels:
|
||||
name: minio-operator
|
||||
namespace: minio-operator
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 4223
|
||||
targetPort: 4223
|
||||
name: https
|
||||
selector:
|
||||
name: minio-operator
|
||||
18
deploy/minio-operator/kustomization.yaml
Normal file
18
deploy/minio-operator/kustomization.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
namespace: minio-operator
|
||||
commonAnnotations:
|
||||
operator.min.io/authors: "MinIO, Inc."
|
||||
operator.min.io/license: "AGPLv3"
|
||||
operator.min.io/support: "https://subnet.min.io"
|
||||
operator.min.io/version: v7.0.1
|
||||
commonLabels:
|
||||
app.kubernetes.io/name: operator
|
||||
resources:
|
||||
- base/namespace.yaml
|
||||
- base/service-account.yaml
|
||||
- base/cluster-role.yaml
|
||||
- base/cluster-role-binding.yaml
|
||||
- base/crds/
|
||||
- base/service.yaml
|
||||
- base/deployment.yaml
|
||||
37
deploy/minio-tenant/ingress.yaml
Normal file
37
deploy/minio-tenant/ingress.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: minio-ingress
|
||||
namespace: minio-tenant
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
|
||||
spec:
|
||||
rules:
|
||||
- host: s3.minio.panic.haus
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: minio
|
||||
port:
|
||||
number: 9000
|
||||
- host: console.minio.panic.haus
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: minio-console
|
||||
port:
|
||||
number: 9090
|
||||
tls:
|
||||
- hosts:
|
||||
- s3.minio.panic.haus
|
||||
- console.minio.panic.haus
|
||||
secretName: minio-tls
|
||||
12
deploy/minio-tenant/kustomization.yaml
Normal file
12
deploy/minio-tenant/kustomization.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: minio-tenant
|
||||
|
||||
resources:
|
||||
- namespace.yaml
|
||||
- secret.yaml
|
||||
- tenant.yaml
|
||||
- ingress.yaml
|
||||
- svc-minio.yaml
|
||||
- svc-minio-console.yaml
|
||||
4
deploy/minio-tenant/namespace.yaml
Normal file
4
deploy/minio-tenant/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: minio-tenant
|
||||
23
deploy/minio-tenant/secret.yaml
Normal file
23
deploy/minio-tenant/secret.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-configuration
|
||||
namespace: minio-tenant
|
||||
stringData:
|
||||
config.env: |-
|
||||
export MINIO_ROOT_USER="rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow"
|
||||
export MINIO_ROOT_PASSWORD="kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z"
|
||||
export MINIO_DOMAIN="s3.minio.panic.haus"
|
||||
export MINIO_BROWSER_REDIRECT_URL="https://console.minio.panic.haus"
|
||||
MINIO_REGION_NAME="cluster-panic-haus"
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
CONSOLE_ACCESS_KEY: Y29uc29sZQ==
|
||||
CONSOLE_SECRET_KEY: ZGRhTDBZSHhlTnR2ZDM4SVI5TVdtS3VFU21ONE00NG4=
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: storage-user
|
||||
namespace: minio-tenant
|
||||
type: Opaque
|
||||
16
deploy/minio-tenant/svc-minio-console.yaml
Normal file
16
deploy/minio-tenant/svc-minio-console.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: minio-console
|
||||
namespace: minio-tenant
|
||||
labels:
|
||||
app: minio
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
v1.min.io/tenant: panic-minio
|
||||
ports:
|
||||
- name: http
|
||||
port: 9090
|
||||
targetPort: 9090
|
||||
protocol: TCP
|
||||
16
deploy/minio-tenant/svc-minio.yaml
Normal file
16
deploy/minio-tenant/svc-minio.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: minio
|
||||
namespace: minio-tenant
|
||||
labels:
|
||||
app: minio
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
v1.min.io/tenant: panic-minio
|
||||
ports:
|
||||
- name: http-minio
|
||||
port: 80
|
||||
targetPort: 9000
|
||||
protocol: TCP
|
||||
79
deploy/minio-tenant/tenant.yaml
Normal file
79
deploy/minio-tenant/tenant.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
apiVersion: minio.min.io/v2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/path: /minio/v2/metrics/cluster
|
||||
prometheus.io/port: "9000"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: minio
|
||||
name: panic-minio
|
||||
namespace: minio-tenant
|
||||
spec:
|
||||
exposeServices: {}
|
||||
imagePullPolicy: IfNotPresent
|
||||
certConfig: {}
|
||||
configuration:
|
||||
name: storage-configuration
|
||||
env: []
|
||||
requestAutoCert: false
|
||||
externalCertSecret: []
|
||||
externalCaCertSecret: []
|
||||
externalClientCertSecrets: []
|
||||
features:
|
||||
bucketDNS: false
|
||||
domains: {}
|
||||
image: quay.io/minio/minio:RELEASE.2025-04-03T14-56-28Z
|
||||
imagePullSecret: {}
|
||||
mountPath: /export
|
||||
podManagementPolicy: Parallel
|
||||
pools:
|
||||
- name: pool-0
|
||||
affinity:
|
||||
nodeAffinity: {}
|
||||
podAffinity: {}
|
||||
podAntiAffinity: {}
|
||||
containerSecurityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
nodeSelector: {}
|
||||
resources: {}
|
||||
securityContext:
|
||||
fsGroup: 1000
|
||||
fsGroupChangePolicy: OnRootMismatch
|
||||
runAsGroup: 1000
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
servers: 4
|
||||
tolerations: []
|
||||
topologySpreadConstraints: []
|
||||
volumeClaimTemplate:
|
||||
apiVersion: v1
|
||||
kind: persistentvolumeclaims
|
||||
metadata: {}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: longhorn
|
||||
status: {}
|
||||
volumesPerServer: 1
|
||||
priorityClassName: ""
|
||||
serviceAccountName: ""
|
||||
serviceMetadata:
|
||||
consoleServiceAnnotations: {}
|
||||
consoleServiceLabels: {}
|
||||
minioServiceAnnotations: {}
|
||||
minioServiceLabels: {}
|
||||
subPath: ""
|
||||
users:
|
||||
- name: storage-user
|
||||
11
deploy/n8n/kustomization.yaml
Normal file
11
deploy/n8n/kustomization.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: n8n
|
||||
|
||||
resources:
|
||||
- n8n-claim0-persistentvolumeclaim.yaml
|
||||
- n8n-ingress.yaml
|
||||
- namespace.yaml
|
||||
- n8n-deployment.yaml
|
||||
- n8n-service.yaml
|
||||
14
deploy/n8n/n8n-claim0-persistentvolumeclaim.yaml
Normal file
14
deploy/n8n/n8n-claim0-persistentvolumeclaim.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
service: n8n-claim0
|
||||
name: n8n-claim0
|
||||
namespace: n8n
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: longhorn
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
82
deploy/n8n/n8n-deployment.yaml
Normal file
82
deploy/n8n/n8n-deployment.yaml
Normal file
@@ -0,0 +1,82 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
service: n8n
|
||||
name: n8n
|
||||
namespace: n8n
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
service: n8n
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
service: n8n
|
||||
spec:
|
||||
initContainers:
|
||||
- name: volume-permissions
|
||||
image: busybox:1.36
|
||||
command: ["sh", "-c", "chown 1000:1000 /data"]
|
||||
volumeMounts:
|
||||
- name: n8n-claim0
|
||||
mountPath: /data
|
||||
containers:
|
||||
- command:
|
||||
- /bin/sh
|
||||
args:
|
||||
- -c
|
||||
- sleep 5; n8n start
|
||||
env:
|
||||
- name: N8N_EDITOR_BASE_URL
|
||||
value: https://n8n.prod.panic.haus/
|
||||
- name: WEBHOOK_URL
|
||||
value: https://n8n.prod.panic.haus/
|
||||
- name: DB_TYPE
|
||||
value: postgresdb
|
||||
- name: DB_POSTGRESDB_HOST
|
||||
value: postgres-base-rw.postgres.svc.cluster.local
|
||||
- name: DB_POSTGRESDB_PORT
|
||||
value: "5432"
|
||||
- name: DB_POSTGRESDB_DATABASE
|
||||
value: n8ndb
|
||||
- name: DB_POSTGRESDB_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: n8n-db-secret
|
||||
key: username
|
||||
- name: DB_POSTGRESDB_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: n8n-db-secret
|
||||
key: password
|
||||
- name: N8N_PROTOCOL
|
||||
value: http
|
||||
- name: N8N_PORT
|
||||
value: "5678"
|
||||
image: n8nio/n8n
|
||||
name: n8n
|
||||
ports:
|
||||
- containerPort: 5678
|
||||
resources:
|
||||
requests:
|
||||
memory: "250Mi"
|
||||
limits:
|
||||
memory: "500Mi"
|
||||
volumeMounts:
|
||||
- mountPath: /home/node/.n8n
|
||||
name: n8n-claim0
|
||||
restartPolicy: Always
|
||||
volumes:
|
||||
- name: n8n-claim0
|
||||
persistentVolumeClaim:
|
||||
claimName: n8n-claim0
|
||||
- name: n8n-secret
|
||||
secret:
|
||||
secretName: n8n-secret
|
||||
- name: n8n-db-secret
|
||||
secret:
|
||||
secretName: n8n-db-secret
|
||||
25
deploy/n8n/n8n-ingress.yaml
Normal file
25
deploy/n8n/n8n-ingress.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: n8n
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
# nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- n8n.prod.panic.haus
|
||||
secretName: n8n-tls
|
||||
rules:
|
||||
- host: n8n.prod.panic.haus
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: n8n
|
||||
port:
|
||||
number: 5678
|
||||
16
deploy/n8n/n8n-service.yaml
Normal file
16
deploy/n8n/n8n-service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
service: n8n
|
||||
name: n8n
|
||||
namespace: n8n
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: "5678"
|
||||
port: 5678
|
||||
targetPort: 5678
|
||||
protocol: TCP
|
||||
selector:
|
||||
service: n8n
|
||||
4
deploy/n8n/namespace.yaml
Normal file
4
deploy/n8n/namespace.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: n8n
|
||||
9
deploy/n8n/secret.yaml
Normal file
9
deploy/n8n/secret.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: n8n-db-secret
|
||||
namespace: n8n
|
||||
type: Opaque
|
||||
data:
|
||||
username: bjhu # base64 encoded
|
||||
password: SHFCTkdHcndzN1VFSk5tUDJRa3lIWGF6YkJaN3lTUkY= # base64 encoded
|
||||
28
deploy/outline-wiki/deploy.yaml
Normal file
28
deploy/outline-wiki/deploy.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: outline
|
||||
namespace: outline-wiki
|
||||
labels:
|
||||
app: outline
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: outline
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: outline
|
||||
spec:
|
||||
containers:
|
||||
- name: outline
|
||||
image: outlinewiki/outline:0.84.0
|
||||
ports:
|
||||
- containerPort: 8089
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: outline-secrets
|
||||
env:
|
||||
- name: PORT
|
||||
value: "8089"
|
||||
27
deploy/outline-wiki/ingress.yaml
Normal file
27
deploy/outline-wiki/ingress.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: outline-ingress
|
||||
namespace: outline-wiki
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- outline.panic.haus
|
||||
secretName: outline-wiki-tls
|
||||
|
||||
rules:
|
||||
- host: outline.panic.haus
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: outline
|
||||
port:
|
||||
number: 8089
|
||||
10
deploy/outline-wiki/kustomization.yaml
Normal file
10
deploy/outline-wiki/kustomization.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: outline-wiki
|
||||
|
||||
resources:
|
||||
- deploy.yaml
|
||||
- service.yaml
|
||||
- secret.yaml
|
||||
- ingress.yaml
|
||||
34
deploy/outline-wiki/secret.yaml
Normal file
34
deploy/outline-wiki/secret.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: outline-secrets
|
||||
namespace: outline-wiki
|
||||
type: Opaque
|
||||
stringData:
|
||||
SECRET_KEY: eae7766055bb20e0b6fb6838cc889121697e59a2b82fd1590dc47a91489acd95
|
||||
UTILS_SECRET: f9e0e1158b7ec2239b465c602172493ee2d1b0765ca6b659b35f64959408492d
|
||||
DATABASE_URL: postgres://outline:ULYpprqxQeS2rSBXF8NxEr4FhJkUAwhWJtkwZij6XwBDSvUUKeAifBBG885fPSmd@postgres-base-rw.postgres/outlinedb
|
||||
REDIS_URL: redis://redis-lb.redis.svc.cluster.local:6379
|
||||
URL: https://outline.panic.haus
|
||||
PGSSLMODE: disable
|
||||
AWS_ACCESS_KEY_ID: rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow
|
||||
AWS_SECRET_ACCESS_KEY: kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z
|
||||
AWS_S3_UPLOAD_BUCKET_URL: https://s3.minio.panic.haus/
|
||||
AWS_REGION: cluster-panic-haus
|
||||
AWS_S3_UPLOAD_BUCKET_NAME: outline
|
||||
FILE_STORAGE_UPLOAD_MAX_SIZE: "26214400"
|
||||
AWS_S3_FORCE_PATH_STYLE: "true"
|
||||
AWS_S3_ACL: private
|
||||
OIDC_DISPLAY_NAME: panicSSO
|
||||
OIDC_CLIENT_ID: outline
|
||||
OIDC_CLIENT_SECRET: W4KxpMkWiRL5EU8yknamRkkZpFFQ1rKN
|
||||
OIDC_AUTH_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/auth?scope=openid
|
||||
OIDC_TOKEN_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/token?scope=openid
|
||||
OIDC_USERINFO_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/userinfo?scope=openid
|
||||
SMTP_HOST: mail.mind-overflow.net
|
||||
SMTP_PORT: "465"
|
||||
SMTP_USERNAME: cloud@mind-overflow.net
|
||||
SMTP_PASSWORD: PcYchuLLUyfT2gvY4Tx7wQ575Tnqjx84zVNoP6Mb
|
||||
SMTP_FROM_EMAIL: cloud@mind-overflow.net
|
||||
SMTP_REPLY_EMAIL: cloud@mind-overflow.net
|
||||
SMTP_SECURE: "true"
|
||||
13
deploy/outline-wiki/service.yaml
Normal file
13
deploy/outline-wiki/service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: outline
|
||||
namespace: outline-wiki
|
||||
spec:
|
||||
selector:
|
||||
app: outline
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8089
|
||||
type: ClusterIP
|
||||
22
deploy/plausible/clickhouse-config.yaml
Normal file
22
deploy/plausible/clickhouse-config.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: clickhouse-config
|
||||
data:
|
||||
clickhouse-config.xml: |
|
||||
<clickhouse>
|
||||
<logger>
|
||||
<level>warning</level>
|
||||
<console>true</console>
|
||||
</logger>
|
||||
<query_thread_log remove="remove"/>
|
||||
<query_log remove="remove"/>
|
||||
<text_log remove="remove"/>
|
||||
<trace_log remove="remove"/>
|
||||
<metric_log remove="remove"/>
|
||||
<asynchronous_metric_log remove="remove"/>
|
||||
|
||||
<!-- Update: Required for newer versions of Clickhouse -->
|
||||
<session_log remove="remove"/>
|
||||
<part_log remove="remove"/>
|
||||
</clickhouse>
|
||||
41
deploy/plausible/clickhouse-deploy.yaml
Normal file
41
deploy/plausible/clickhouse-deploy.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: clickhouse
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: clickhouse
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: clickhouse
|
||||
spec:
|
||||
containers:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:22.6-alpine
|
||||
# You may expose ports if needed (for example, HTTP on 8123)
|
||||
ports:
|
||||
- containerPort: 8123
|
||||
volumeMounts:
|
||||
- name: event-data
|
||||
mountPath: /var/lib/clickhouse
|
||||
- name: clickhouse-config
|
||||
mountPath: /etc/clickhouse-server/config.d/logging.xml
|
||||
subPath: clickhouse-config.xml
|
||||
readOnly: true
|
||||
- name: clickhouse-user-config
|
||||
mountPath: /etc/clickhouse-server/users.d/logging.xml
|
||||
subPath: clickhouse-user-config.xml
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: event-data
|
||||
persistentVolumeClaim:
|
||||
claimName: event-data-pvc
|
||||
- name: clickhouse-config
|
||||
configMap:
|
||||
name: clickhouse-config
|
||||
- name: clickhouse-user-config
|
||||
configMap:
|
||||
name: clickhouse-user-config
|
||||
11
deploy/plausible/clickhouse-pvc.yaml
Normal file
11
deploy/plausible/clickhouse-pvc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: event-data-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
storageClassName: longhorn
|
||||
14
deploy/plausible/clickhouse-svc.yaml
Normal file
14
deploy/plausible/clickhouse-svc.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: clickhouse
|
||||
labels:
|
||||
app: clickhouse
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 8123
|
||||
targetPort: 8123
|
||||
selector:
|
||||
app: clickhouse
|
||||
14
deploy/plausible/clickhouse-user-config.yaml
Normal file
14
deploy/plausible/clickhouse-user-config.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: clickhouse-user-config
|
||||
data:
|
||||
clickhouse-user-config.xml: |
|
||||
<clickhouse>
|
||||
<profiles>
|
||||
<default>
|
||||
<log_queries>0</log_queries>
|
||||
<log_query_threads>0</log_query_threads>
|
||||
</default>
|
||||
</profiles>
|
||||
</clickhouse>
|
||||
17
deploy/plausible/kustomization.yaml
Normal file
17
deploy/plausible/kustomization.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
namespace: plausible
|
||||
|
||||
resources:
|
||||
- clickhouse-config.yaml
|
||||
- clickhouse-pvc.yaml
|
||||
- clickhouse-svc.yaml
|
||||
- mail-svc.yaml
|
||||
- plausible-secret.yaml
|
||||
- clickhouse-deploy.yaml
|
||||
- clickhouse-user-config.yaml
|
||||
- mail-deploy.yaml
|
||||
- plausible-deploy.yaml
|
||||
- plausible-ingress.yaml
|
||||
- plausible-svc.yaml
|
||||
21
deploy/plausible/mail-deploy.yaml
Normal file
21
deploy/plausible/mail-deploy.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mail
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mail
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mail
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: "amd64"
|
||||
containers:
|
||||
- name: mail
|
||||
image: bytemark/smtp
|
||||
ports:
|
||||
- containerPort: 25
|
||||
11
deploy/plausible/mail-svc.yaml
Normal file
11
deploy/plausible/mail-svc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mail
|
||||
spec:
|
||||
selector:
|
||||
app: mail
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 25
|
||||
targetPort: 25
|
||||
26
deploy/plausible/plausible-deploy.yaml
Normal file
26
deploy/plausible/plausible-deploy.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: plausible
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: plausible
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: plausible
|
||||
spec:
|
||||
containers:
|
||||
- name: plausible
|
||||
image: plausible/analytics:latest
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run"
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: plausible-env
|
||||
25
deploy/plausible/plausible-ingress.yaml
Normal file
25
deploy/plausible/plausible-ingress.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: plausible-ingress
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "letsencrypt-prod"
|
||||
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- webstats.beatrice.wtf
|
||||
secretName: plausible-tls
|
||||
rules:
|
||||
- host: webstats.beatrice.wtf
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: plausible
|
||||
port:
|
||||
number: 8000
|
||||
22
deploy/plausible/plausible-secret.yaml
Normal file
22
deploy/plausible/plausible-secret.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: plausible-env
|
||||
data:
|
||||
ADMIN_USER_EMAIL: aGVsbG9AYmVhdHJpY2Uud3Rm
|
||||
ADMIN_USER_NAME: YmVhdHJpY2U=
|
||||
ADMIN_USER_PWD: Xl55Z1d4UGtEMiRQSlF1JXZAQ1Q1ZF5lNnRDbmhBXk5qZnpTVlYyISNTN2U3N25wU25wZkpUYWF6RGVWRFVSTA==
|
||||
BASE_URL: aHR0cHM6Ly93ZWJzdGF0cy5iZWF0cmljZS53dGY=
|
||||
DATABASE_URL: cG9zdGdyZXM6Ly9wbGF1c2libGU6cnY5Mzhnd2d3ZzQzNGYyZjRoZzNnN2gzMDg5N2czaDVnMDk4akBwb3N0Z3Jlcy1iYXNlLXJ3LnBvc3RncmVzOjU0MzIvcGxhdXNpYmxlX2Ri
|
||||
CLICKHOUSE_DATABASE_URL: aHR0cDovL2NsaWNraG91c2U6ODEyMy9wbGF1c2libGVfZXZlbnRzX2Ri
|
||||
DISABLE_REGISTRATION: dHJ1ZQ==
|
||||
MAILER_EMAIL: Y2xvdWRAbWluZC1vdmVyZmxvdy5uZXQ=
|
||||
PORT: ODAwMA==
|
||||
SECRET_KEY_BASE: M1FRQS9EdEdmR3c3cytjMzF2dnlmZ3lVc2F4RStNOWsxSWIvNVBjTUJIQjVHNWdpek00a2tSQ2lvbUFkU0lKR3FybGJ5R2h6VEFOcUJLWWZyeFZ0eHc9PQ==
|
||||
SMTP_HOST_ADDR: bWFpbC5taW5kLW92ZXJmbG93Lm5ldA==
|
||||
SMTP_HOST_PORT: NTg3
|
||||
SMTP_HOST_SSL_ENABLED: ZmFsc2U=
|
||||
SMTP_USER_NAME: Y2xvdWRAbWluZC1vdmVyZmxvdy5uZXQ=
|
||||
SMTP_USER_PWD: UGNZY2h1TExVeWZUMmd2WTRUeDd3UTU3NVRucWp4ODR6Vk5vUDZNYg==
|
||||
|
||||
|
||||
11
deploy/plausible/plausible-svc.yaml
Normal file
11
deploy/plausible/plausible-svc.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: plausible
|
||||
spec:
|
||||
selector:
|
||||
app: plausible
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8000
|
||||
targetPort: 8000
|
||||
@@ -9,12 +9,18 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
// Disable the built-in Grafana component
|
||||
grafana+:: {},
|
||||
|
||||
// Prometheus customizations: external URL, persistent storage, and self-scrape enabled
|
||||
// Prometheus customizations: external URL and persistent storage
|
||||
prometheus+:: {
|
||||
prometheus+: {
|
||||
spec+: {
|
||||
serviceMonitorSelector: {},
|
||||
externalUrl: 'https://metrics.prod.panic.haus',
|
||||
retention: '30d',
|
||||
retentionSize: '16GB',
|
||||
additionalScrapeConfigs: {
|
||||
name: 'prometheus-additional-scrape-configs',
|
||||
key: 'additional-scrape-configs.yaml',
|
||||
},
|
||||
storage: {
|
||||
volumeClaimTemplate: {
|
||||
spec: {
|
||||
@@ -24,20 +30,6 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
|
||||
},
|
||||
},
|
||||
},
|
||||
// Set a ServiceMonitor selector that matches the Prometheus service's labels
|
||||
serviceMonitorSelector: {
|
||||
matchLabels: {
|
||||
"k8s-app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Ensure the Prometheus service gets the label so that the selector above matches it
|
||||
service+: {
|
||||
metadata+: {
|
||||
labels: {
|
||||
"k8s-app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -2,7 +2,11 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: prometheus
|
||||
app.kubernetes.io/component: prometheus
|
||||
app.kubernetes.io/instance: k8s
|
||||
app.kubernetes.io/name: prometheus
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 3.2.1
|
||||
name: prometheus-k8s
|
||||
namespace: monitoring
|
||||
spec:
|
||||
|
||||
@@ -10,6 +10,9 @@ metadata:
|
||||
name: k8s
|
||||
namespace: monitoring
|
||||
spec:
|
||||
additionalScrapeConfigs:
|
||||
key: additional-scrape-configs.yaml
|
||||
name: prometheus-additional-scrape-configs
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- apiVersion: v2
|
||||
@@ -38,6 +41,7 @@ spec:
|
||||
requests:
|
||||
memory: 400Mi
|
||||
retention: 30d
|
||||
retentionSize: 16GB
|
||||
ruleNamespaceSelector: {}
|
||||
ruleSelector: {}
|
||||
scrapeConfigNamespaceSelector: {}
|
||||
@@ -48,9 +52,7 @@ spec:
|
||||
runAsUser: 1000
|
||||
serviceAccountName: prometheus-k8s
|
||||
serviceMonitorNamespaceSelector: {}
|
||||
serviceMonitorSelector:
|
||||
matchLabels:
|
||||
k8s-app: prometheus
|
||||
serviceMonitorSelector: {}
|
||||
storage:
|
||||
volumeClaimTemplate:
|
||||
spec:
|
||||
|
||||
@@ -2,7 +2,11 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: prometheus
|
||||
app.kubernetes.io/component: prometheus
|
||||
app.kubernetes.io/instance: k8s
|
||||
app.kubernetes.io/name: prometheus
|
||||
app.kubernetes.io/part-of: kube-prometheus
|
||||
app.kubernetes.io/version: 3.2.1
|
||||
name: prometheus-k8s
|
||||
namespace: monitoring
|
||||
spec:
|
||||
|
||||
12
deploy/prometheus/prometheus-additional-scrape-configs.yaml
Normal file
12
deploy/prometheus/prometheus-additional-scrape-configs.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: prometheus-additional-scrape-configs
|
||||
namespace: monitoring
|
||||
stringData:
|
||||
additional-scrape-configs.yaml: |
|
||||
- job_name: 'proxmox-holly-node-exporter'
|
||||
scheme: https
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets: ['node-exporter.holly.panic.haus']
|
||||
@@ -16,6 +16,8 @@ spec:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/arch: amd64
|
||||
containers:
|
||||
- name: redis
|
||||
image: redis:7.4-alpine
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: renovate
|
||||
namespace: renovate
|
||||
spec:
|
||||
schedule: "*/15 * * * *"
|
||||
schedule: '@hourly'
|
||||
concurrencyPolicy: Forbid
|
||||
jobTemplate:
|
||||
spec:
|
||||
|
||||
1
deploy/rocket-chat/.helmignore
Normal file
1
deploy/rocket-chat/.helmignore
Normal file
@@ -0,0 +1 @@
|
||||
.git
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user