Compare commits

..

169 Commits

Author SHA1 Message Date
1c60146f33 use amd64 for redis 2025-06-30 22:34:55 +02:00
23adf5a6be remove pve exporter 2025-06-07 22:03:08 +02:00
790d2152d3 update prometheus jobs 2025-06-07 20:58:49 +02:00
3f156984d9 fix prometheus 2025-06-07 20:37:44 +02:00
d834e58879 fix secret 2025-06-07 20:34:56 +02:00
3a4a28598a fix file name 2025-06-07 20:33:14 +02:00
cc00e8de8b update prometheus 2025-06-07 20:26:32 +02:00
6404f7772b update prometheus for proxmox 2025-06-07 20:24:46 +02:00
aee2d151a3 switch to stable 2025-06-02 02:49:47 +02:00
15dd965c7c revert to beta 2025-06-02 02:38:56 +02:00
04be76502d fix pvc 2025-06-02 02:36:06 +02:00
620b5ee9b1 move migration and server into same thing 2025-06-02 02:32:28 +02:00
b3ce3d5067 update tags 2025-06-02 02:28:38 +02:00
cefc5e5565 fix tls 2025-06-02 02:11:13 +02:00
e25b5947fc fix fqdn 2025-06-02 02:04:10 +02:00
cde2250d27 fix redis 2025-06-02 01:45:38 +02:00
4946ee57c1 fix port 2025-06-02 01:43:41 +02:00
a4ebfa259c fix port 2025-06-02 01:42:00 +02:00
c447b1be48 fix port 2025-06-02 01:40:48 +02:00
35f6eaf618 fix image 2025-06-02 01:37:07 +02:00
f6603d55a7 use rwm 2025-06-02 01:35:30 +02:00
742396fd8e fix image 2025-06-02 01:32:49 +02:00
fd004a7479 add affine 2025-06-02 01:31:34 +02:00
08331f6ae3 update outline 2025-06-02 01:14:53 +02:00
89173418cd update outline 2025-06-02 01:14:13 +02:00
69356bb160 disable signup 2025-06-02 01:05:58 +02:00
cf6f3546d4 update key 2025-06-02 01:03:32 +02:00
89a5d04c42 update saml 2025-06-02 01:02:23 +02:00
d0e9acf392 update fqdn 2025-06-02 00:50:42 +02:00
7f22d664bb fix ingress 2025-06-02 00:24:26 +02:00
624b2bb1b8 fix ingress 2025-06-02 00:23:05 +02:00
1105260935 fix ingress 2025-06-02 00:20:10 +02:00
ef850cd4f1 fix ingress 2025-06-02 00:19:33 +02:00
333490d4c2 fix ingress 2025-06-02 00:19:05 +02:00
083891c884 fix ingress 2025-06-02 00:06:05 +02:00
655e0691c2 fix ingress 2025-06-02 00:05:40 +02:00
9d236e1f97 fix readiness probe 2025-06-01 23:51:00 +02:00
c4d4098b99 fix ingress 2025-06-01 23:43:56 +02:00
ff96741d23 fix api 2025-06-01 23:39:35 +02:00
9cfab300d0 fix deploy 2025-06-01 23:37:04 +02:00
c655dec0bf fix ingress 2025-06-01 23:35:48 +02:00
dfe8eb3d46 revert psql edit 2025-06-01 23:27:03 +02:00
6e4a07076a fix cluster type 2025-06-01 23:23:05 +02:00
b740b48782 add vector psql ext 2025-06-01 23:22:07 +02:00
d1acf204ce fix port 2025-06-01 23:16:19 +02:00
73714929f9 fix env 2025-06-01 23:13:29 +02:00
81177b18d5 add appflowy 2025-06-01 23:11:51 +02:00
e2c84e0bf8 update prometheus 2025-05-31 15:31:36 +02:00
dea4045dc6 delete bitpoke mysql because it's bad 2025-04-14 19:01:26 +02:00
e37aac251a fix s3 2025-04-14 18:59:22 +02:00
2f06076990 fix s3 2025-04-14 18:58:04 +02:00
0c3cce909b add minio for cloning 2025-04-14 18:43:17 +02:00
c11a777700 fix nodeSelector 2025-04-14 18:18:09 +02:00
9e6467f6bb fix nodeselector 2025-04-14 18:14:53 +02:00
029918de44 update mysql cluster 2025-04-14 18:11:05 +02:00
7697f2f36e fix arch 2025-04-14 14:41:32 +02:00
84a03a6eac fix chart.yaml 2025-04-14 14:38:42 +02:00
209b21c83f fix yaml 2025-04-14 14:32:26 +02:00
2b032964a2 move mysql operator and cluster 2025-04-14 14:30:26 +02:00
6e2597ffa7 fix yaml 2025-04-14 14:28:00 +02:00
dc6f4a0555 update chart 2025-04-14 14:15:43 +02:00
7f2240ff6a add mysql ha 2025-04-14 14:12:07 +02:00
3e33b17c2c fix ingress 2025-04-14 12:41:27 +02:00
aed1806127 fix missing clickhouse svc 2025-04-14 12:37:45 +02:00
57db805f10 fix clickhouse db url 2025-04-14 12:35:28 +02:00
8dd4f30803 fix mailer exec format error 2025-04-14 12:31:10 +02:00
fcf2450a8e fix mailer exec format error 2025-04-14 12:29:13 +02:00
c2818b1c8c fix yaml 2025-04-14 12:04:12 +02:00
eda474ec92 add plausible 2025-04-14 11:59:56 +02:00
f987f9f3ec update longhorn default replicas 2025-04-13 16:00:08 +02:00
e3364afe28 update values 2025-04-13 11:55:23 +02:00
5d8a4e1791 update values 2025-04-13 11:54:01 +02:00
b783db47b9 update descheduler values 2025-04-13 11:49:46 +02:00
2a623cf21f update descheduler values 2025-04-13 11:47:54 +02:00
be7e80d716 fix descheduler values 2025-04-13 11:44:57 +02:00
8e42b7f782 Merge remote-tracking branch 'origin/main' 2025-04-13 11:31:40 +02:00
db25c37cde add descheduler helm 2025-04-13 11:27:16 +02:00
f7144a7cdf update jsonnet for prometheus volume 2025-04-13 00:37:51 +02:00
687b6585e6 fix minio svc 2025-04-13 00:15:05 +02:00
52dd463de1 fix minio svc 2025-04-13 00:12:30 +02:00
1ac6d41e26 remove buggy probes 2025-04-12 23:59:44 +02:00
e41c5e8ca7 fix host 2025-04-12 23:54:49 +02:00
fc203268bc fix host 2025-04-12 23:51:48 +02:00
a64f2b4bfe update probes 2025-04-12 23:48:15 +02:00
768f29680b update probes 2025-04-12 23:43:12 +02:00
f57ea1afd9 update bind addr 2025-04-12 23:39:41 +02:00
43860d2464 fix probes 2025-04-12 23:36:27 +02:00
85f6f81e23 try to make outline HA 2025-04-12 23:30:06 +02:00
55bbc6d2d4 increase body size 2025-04-12 23:04:59 +02:00
3744e9fb82 increase body size 2025-04-12 23:04:17 +02:00
94d313666a update url 2025-04-12 22:58:06 +02:00
47d88bdf99 update keycloak pointers 2025-04-12 22:37:59 +02:00
ee6b0c8ab3 fix ingress 2025-04-12 22:34:04 +02:00
9034ec500b remove volumes 2025-04-12 22:32:15 +02:00
8fbe87890c add outline wiki 2025-04-12 22:28:21 +02:00
837371313b add region to minio config 2025-04-12 21:20:35 +02:00
f854919802 add minio secrets 2025-04-12 21:04:55 +02:00
3f58967ebd fix redirect url 2025-04-06 21:40:59 +02:00
9d769840b7 fix ingress 2025-04-06 21:38:55 +02:00
d75dd0fca4 fix ingress 2025-04-06 21:36:01 +02:00
f55875bc8f fix console port 2025-04-06 21:32:55 +02:00
3cd7a391a1 fix svc 2025-04-06 21:30:27 +02:00
189a664a23 fix secret 2025-04-06 21:27:49 +02:00
c52bfb3045 fix ingress 2025-04-06 21:24:35 +02:00
50dc452b0d fix tenant 2025-04-06 21:20:36 +02:00
ece670c77e fix minio 2025-04-06 21:16:23 +02:00
a40c495e26 fix rbac 2025-04-06 21:14:40 +02:00
0ccb40b8fc fix tls for minio 2025-04-06 21:13:03 +02:00
70019d0775 fix tls 2025-04-06 21:09:30 +02:00
8553165048 fix tls name 2025-04-06 21:06:48 +02:00
a8058e745d fix tls 2025-04-06 21:03:32 +02:00
c8501f505b fix tls termination 2025-04-06 20:58:12 +02:00
4709f6ba84 fix typo 2025-04-06 20:50:20 +02:00
d61af7e58c fix s3 endpoint 2025-04-06 20:46:57 +02:00
e93179896b use clusterip 2025-04-06 20:28:59 +02:00
dbb6381898 fix minio svc 2025-04-06 20:24:56 +02:00
1e4a007d72 update tenant name 2025-04-06 20:16:59 +02:00
5d62486f55 fix minio tenant 2025-04-06 20:12:46 +02:00
7558f369c5 add minio tenant 2025-04-06 20:08:40 +02:00
38e230a9a5 add minio operator 2025-04-06 19:56:09 +02:00
e20674bd1d fix elk for grafana 2025-04-06 12:32:17 +02:00
f98fad2e88 make nexus run on arm 2025-04-06 00:44:54 +02:00
69f9ff7bfb fix ingress 2025-04-05 23:58:37 +02:00
2c849582c9 fix secret name 2025-04-05 23:56:04 +02:00
3ddf7b22b1 update nexus deploy 2025-04-05 23:51:35 +02:00
79349f9743 fix yaml 2025-04-05 23:48:44 +02:00
9a3f4bac60 add nexus oss 2025-04-05 23:45:57 +02:00
556617ece5 update ingress 2025-04-05 22:59:25 +02:00
d35c568250 update values.yaml 2025-04-05 22:58:23 +02:00
878eeb1c4b add templates 2025-04-05 22:55:08 +02:00
815255d4da add ingress 2025-04-05 22:53:20 +02:00
96a4e310c8 update values.yaml 2025-04-05 22:47:14 +02:00
bb2dc111a0 fix yaml 2025-04-05 22:37:43 +02:00
d63ee71bfa add uptime kuma 2025-04-05 22:35:49 +02:00
ebc754b4af fix n8n domain 2025-04-04 21:23:44 +02:00
76fcc1da6c fix n8n svc 2025-04-04 21:06:14 +02:00
0f012ef2ad fix yaml 2025-04-04 21:02:23 +02:00
ccf14644a7 fix yaml 2025-04-04 21:01:35 +02:00
2ae346e8ef add n8n 2025-04-04 20:59:32 +02:00
c2cfef7397 update values 2025-04-03 15:28:53 +02:00
c95f585e80 fix secret 2025-04-03 15:26:54 +02:00
aa6a51e4b5 update values.yaml 2025-04-03 15:25:50 +02:00
71b6b05c41 fix values.yaml 2025-04-03 15:24:09 +02:00
17678c914a update values.yaml 2025-04-03 15:22:03 +02:00
d073c01735 update values 2025-04-03 15:19:07 +02:00
571c3a4dbb update rocket-chat 2025-04-03 15:16:10 +02:00
31f6b361ac add deps 2025-04-03 15:11:37 +02:00
5127028f6d add rocketchat helm 2025-04-03 15:02:31 +02:00
50847afaa0 add rocketchat 2025-04-03 14:54:17 +02:00
e788b770d9 update oauth2 proxy 2025-04-03 11:32:28 +02:00
ed0878a9bc temp fix kibana 2025-03-31 10:47:57 +02:00
ad4708fdff update kibana 2025-03-31 10:41:56 +02:00
cf707d1887 fix kibana 2025-03-31 10:39:33 +02:00
f111b6337d fix kibana 2025-03-31 10:38:15 +02:00
7a33d8212b make kibana use oauth2 2025-03-31 10:30:57 +02:00
e9a4de02cc Revert "try oauth2 proxy for prom"
This reverts commit 027c9edb6d.
2025-03-31 10:23:12 +02:00
027c9edb6d try oauth2 proxy for prom 2025-03-31 10:21:56 +02:00
6ad3654593 fix elastic 2025-03-31 10:06:38 +02:00
f0e10ed035 add elastic data source 2025-03-31 10:04:29 +02:00
25946ad705 fix nginx kibana 2025-03-31 04:24:36 +02:00
c7a97af6a9 add kibana ingress 2025-03-31 04:15:19 +02:00
ea82169286 add kibana 2025-03-31 04:10:52 +02:00
38c43eee1d temp disable longhorn 2025-03-31 04:05:34 +02:00
ae368b504c reduce volume 2025-03-31 03:59:57 +02:00
15c3ca05f8 fix typo 2025-03-31 03:56:59 +02:00
3ebbd128e1 add elasticsearch 2025-03-31 03:56:39 +02:00
3eec7c3d02 add elasticsearch 2025-03-31 03:56:03 +02:00
29bad892fc fix elk 2025-03-31 03:53:40 +02:00
4d6a5c4984 fix typo 2025-03-31 03:50:40 +02:00
273 changed files with 32709 additions and 25 deletions

BIN
.DS_Store vendored

Binary file not shown.

3
.gitignore vendored
View File

@@ -1 +1,2 @@
**/secret.yaml **/.DS_Store
.idea/

BIN
deploy/.DS_Store vendored

Binary file not shown.

View File

@@ -0,0 +1,93 @@
# --------------------------------------------------------------------
# 5b) Deployment: affine-server (serves HTTP on port 3010)
# --------------------------------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
name: affine-server
namespace: affine
labels:
app: affine-server
spec:
replicas: 1
selector:
matchLabels:
app: affine-server
template:
metadata:
labels:
app: affine-server
spec:
initContainers:
- name: affine-migrate
image: ghcr.io/toeverything/affine-graphql:stable-9e7280c
command: ["sh", "-c", "node ./scripts/self-host-predeploy.js"]
env:
- name: REDIS_SERVER_HOST
value: "redis-lb.redis.svc.cluster.local"
- name: REDIS_SERVER_PORT
value: "6379"
- name: DATABASE_URL
value: >
postgresql://$(DB_USERNAME):$(DB_PASSWORD)@postgres-base-rw.postgres.svc.cluster.local:5432/$(DB_DATABASE)
- name: AFFINE_SERVER_PORT
value: "3010"
envFrom:
- secretRef:
name: affine-db-secret
volumeMounts:
- name: affine-storage
mountPath: /root/.affine/storage
- name: affine-config
mountPath: /root/.affine/config
containers:
- name: affine
image: ghcr.io/toeverything/affine-graphql:stable-9e7280c
ports:
- containerPort: 3010
name: http
env:
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0"
- name: AFFINE_SERVER_HTTPS
value: "true"
- name: AFFINE_SERVER_HOST
value: "affine.prod.panic.haus"
- name: REDIS_SERVER_HOST
value: "redis-lb.redis.svc.cluster.local"
- name: REDIS_SERVER_PORT
value: "6379"
- name: DATABASE_URL
value: >-
postgresql://$(DB_USERNAME):$(DB_PASSWORD)@postgres-base-rw.postgres.svc.cluster.local:5432/$(DB_DATABASE)
- name: AFFINE_SERVER_EXTERNAL_URL
value: "https://affine.prod.panic.haus"
- name: AFFINE_SERVER_PORT
value: "3010"
envFrom:
- secretRef:
name: affine-db-secret
readinessProbe:
httpGet:
path: /health
port: 3010
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 3010
initialDelaySeconds: 30
periodSeconds: 20
volumeMounts:
- name: affine-storage
mountPath: /root/.affine/storage
- name: affine-config
mountPath: /root/.affine/config
volumes:
- name: affine-storage
persistentVolumeClaim:
claimName: affine-storage-pvc
- name: affine-config
persistentVolumeClaim:
claimName: affine-config-pvc

View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: affine-ingress
namespace: affine
annotations:
# (If youre using cert-manager + Lets Encrypt)
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
spec:
ingressClassName: nginx
tls:
- hosts:
- affine.prod.panic.haus # ← replace with your desired Affine hostname
secretName: affine-tls # ← must match an existing TLS Secret for that host
rules:
- host: affine.prod.panic.haus # ← change to whatever subdomain you choose
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: affine-server
port:
number: 3010

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: affine
resources:
- secret.yaml
- pvc.yaml
- service.yaml
- deployment.yaml
- ingress.yaml

28
deploy/affine/pvc.yaml Normal file
View File

@@ -0,0 +1,28 @@
# 3a) PVC for Affines upload storage (~/root/.affine/storage)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: affine-storage-pvc
namespace: affine
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# 3b) PVC for Affines config (~/root/.affine/config)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: affine-config-pvc
namespace: affine
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

10
deploy/affine/secret.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: affine-db-secret
namespace: affine
stringData:
# Database credentials for Affine
DB_USERNAME: "affine"
DB_PASSWORD: "tqMB9UjJ7GZrWnux4sJ9nDPR4xQLq6Vz"
DB_DATABASE: "affine_db"

View File

@@ -0,0 +1,15 @@
# This Service exposes Affine on port 3010 within the cluster
apiVersion: v1
kind: Service
metadata:
name: affine-server
namespace: affine
spec:
selector:
app: affine-server
ports:
- name: http
port: 3010
targetPort: 3010
protocol: TCP
type: ClusterIP

View File

@@ -0,0 +1,350 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gotrue
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: gotrue
template:
metadata:
labels:
app: gotrue
spec:
containers:
- name: gotrue
image: appflowyinc/gotrue:latest
ports:
- containerPort: 9999
env:
- name: GOTRUE_SAML_ENABLED
value: "true"
- name: GOTRUE_SAML_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_SAML_PRIVATE_KEY
# ----- DB (Postgres HA) -----
- name: GOTRUE_DB_DRIVER
value: postgres
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_DATABASE_URL
- name: GOTRUE_ADMIN_EMAIL
value: hello@beatrice.wtf
- name: GOTRUE_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_ADMIN_PASSWORD
- name: GOTRUE_DISABLE_SIGNUP
value: "true"
- name: GOTRUE_SITE_URL
value: "appflowy-flutter://"
- name: GOTRUE_URI_ALLOW_LIST
value: "**"
- name: GOTRUE_JWT_SECRET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_JWT_SECRET
- name: GOTRUE_JWT_EXP
value: "7200"
- name: GOTRUE_SMTP_HOST
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_HOST
- name: GOTRUE_SMTP_PORT
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PORT
- name: GOTRUE_SMTP_USER
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: GOTRUE_SMTP_PASS
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PASS
- name: GOTRUE_SMTP_ADMIN_EMAIL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: PORT
value: "9999"
- name: GOTRUE_JWT_ADMIN_GROUP_NAME
value: supabase_admin
- name: API_EXTERNAL_URL
value: https://orbit.panic.haus/gotrue
- name: GOTRUE_MAILER_URLPATHS_CONFIRMATION
value: /gotrue/verify
- name: GOTRUE_MAILER_URLPATHS_INVITE
value: /gotrue/verify
- name: GOTRUE_MAILER_URLPATHS_RECOVERY
value: /gotrue/verify
- name: GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE
value: /gotrue/verify
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: appflowy-cloud
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: appflowy-cloud
template:
metadata:
labels:
app: appflowy-cloud
spec:
containers:
- name: appflowy-cloud
image: appflowyinc/appflowy_cloud:latest
ports:
- containerPort: 8000
env:
# ----- Database -----
- name: APPFLOWY_DATABASE_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_DATABASE_URL
- name: APPFLOWY_REDIS_URI
value: "redis://redis-lb.redis.svc.cluster.local:6379"
# ----- GoTrue (Auth) -----
- name: APPFLOWY_GOTRUE_BASE_URL
value: "http://gotrue.appflowy.svc.cluster.local:9999"
- name: APPFLOWY_GOTRUE_JWT_SECRET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_JWT_SECRET
- name: APPFLOWY_GOTRUE_JWT_EXP
value: "7200"
# ----- S3 / Minio -----
- name: APPFLOWY_S3_USE_MINIO
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_USE_MINIO
- name: APPFLOWY_S3_MINIO_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_MINIO_URL
- name: APPFLOWY_S3_BUCKET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_BUCKET
- name: APPFLOWY_S3_REGION
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_REGION
- name: APPFLOWY_S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_ACCESS_KEY
- name: APPFLOWY_S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_SECRET_KEY
#- name: APPFLOWY_S3_PRESIGNED_URL_ENDPOINT
# value: "https://minio.example.com"
# ← Replace with your actual public Minio endpoint if different
# ----- Mailer (AppFlowy Cloud) -----
- name: APPFLOWY_MAILER_SMTP_HOST
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_HOST
- name: APPFLOWY_MAILER_SMTP_PORT
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PORT
- name: APPFLOWY_MAILER_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: APPFLOWY_MAILER_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PASS
- name: APPFLOWY_MAILER_SMTP_EMAIL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: APPFLOWY_MAILER_SMTP_TLS_KIND
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_TLS_KIND
# ----- General -----
- name: APPFLOWY_ACCESS_CONTROL
value: "true"
- name: RUST_LOG
value: info
- name: APPFLOWY_ENVIRONMENT
value: production
- name: APPFLOWY_WEB_URL
value: "https://orbit.panic.haus" # ← your public AppFlowy URL
readinessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 20
periodSeconds: 20
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: admin-frontend
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: admin-frontend
template:
metadata:
labels:
app: admin-frontend
spec:
containers:
- name: admin-frontend
image: appflowyinc/admin_frontend:latest
ports:
- containerPort: 80
env:
- name: ADMIN_FRONTEND_REDIS_URL
value: "redis://redis-lb.redis.svc.cluster.local:6379"
- name: ADMIN_FRONTEND_GOTRUE_URL
value: "http://gotrue.appflowy.svc.cluster.local:9999"
- name: ADMIN_FRONTEND_APPFLOWY_CLOUD_URL
value: "http://appflowy-cloud.appflowy.svc.cluster.local:8000"
- name: ADMIN_FRONTEND_PATH_PREFIX
value: "/console"
- name: ADMIN_FRONTEND_PORT
value: "80"
readinessProbe:
httpGet:
path: /console
port: 80
initialDelaySeconds: 5
periodSeconds: 10
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: appflowy-worker
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: appflowy-worker
template:
metadata:
labels:
app: appflowy-worker
spec:
containers:
- name: appflowy-worker
image: appflowyinc/appflowy_worker:latest
env:
- name: RUST_LOG
value: info
- name: APPFLOWY_ENVIRONMENT
value: production
- name: APPFLOWY_WORKER_REDIS_URL
value: "redis://redis-lb.redis.svc.cluster.local:6379"
- name: APPFLOWY_WORKER_DATABASE_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_DATABASE_URL
- name: APPFLOWY_WORKER_DATABASE_NAME
value: appflowy_db
- name: APPFLOWY_S3_USE_MINIO
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_USE_MINIO
- name: APPFLOWY_S3_MINIO_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_MINIO_URL
- name: APPFLOWY_S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_ACCESS_KEY
- name: APPFLOWY_S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_SECRET_KEY
- name: APPFLOWY_S3_BUCKET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_BUCKET
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: appflowy-web
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: appflowy-web
template:
metadata:
labels:
app: appflowy-web
spec:
containers:
- name: appflowy-web
image: appflowyinc/appflowy_web:latest
ports:
- containerPort: 80
env:
- name: APPFLOWY_CLOUD_URL
value: "http://appflowy-cloud.appflowy.svc.cluster.local:8000"

View File

@@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: appflowy-gotrue-ingress
namespace: appflowy
annotations:
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
tls:
- hosts:
- orbit.panic.haus
secretName: appflowy-tls
rules:
- host: orbit.panic.haus
http:
paths:
# GoTrue: rewrite /gotrue(/|$)(.*) → /$2
- path: /gotrue(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: gotrue
port:
number: 9999

View File

@@ -0,0 +1,56 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: appflowy-ingress
namespace: appflowy
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
spec:
ingressClassName: nginx
tls:
- hosts:
- orbit.panic.haus # ← replace with your public domain
secretName: appflowy-tls
rules:
- host: orbit.panic.haus
http:
paths:
# ┌──────────────────────────────────────────────────────────────────────────────┐
# │ 1) Admin UI (served under /console) │
# └──────────────────────────────────────────────────────────────────────────────┘
- path: /console
pathType: Prefix
backend:
service:
name: admin-frontend
port:
number: 80
# ┌──────────────────────────────────────────────────────────────────────────────┐
# │ 3) AppFlowy-Cloud API & Web │
# • If you want API served on /api, and the static Web on / │
# • You could also send all traffic to appflowy-web and let it call │
# • the backend at /api internally. │
# └──────────────────────────────────────────────────────────────────────────────┘
# a) Direct all `/api/*` calls to the backend service
- path: /api
pathType: Prefix
backend:
service:
name: appflowy-cloud
port:
number: 8000
# b) Everything else (root path) → appflowy-web (static UI)
- path: /
pathType: Prefix
backend:
service:
name: appflowy-web
port:
number: 80

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: appflowy
resources:
- secret.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- gotrue-ingress.yaml

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Secret
metadata:
name: appflowy-secrets
namespace: appflowy
stringData:
FQDN: "orbit.panic.haus"
SCHEME: "https"
APPFLOWY_BASE_URL: "https://orbit.panic.haus"
APPFLOWY_WEB_URL: "https://orbit.panic.haus"
# ==== PostgreSQL credentials ====
GOTRUE_DATABASE_URL: "postgres://appflowy:AjUIkz5lcaEGpCrO9KHYAvaKbLsH2Q0e@postgres-base-rw.postgres.svc.cluster.local:5432/appflowy_db?search_path=auth"
APPFLOWY_DATABASE_URL: "postgres://appflowy:AjUIkz5lcaEGpCrO9KHYAvaKbLsH2Q0e@postgres-base-rw.postgres.svc.cluster.local:5432/appflowy_db"
# ==== GoTrue (Auth) keys ====
GOTRUE_JWT_SECRET: "5IqQzMmpRPoeParMsgoWIphrCYdhFhxz9NSyEQYlwGyTrRSsjInyMSaM44ZCH"
GOTRUE_ADMIN_PASSWORD: "KaTPKUXiDUVIcUYWjqSy5SFdqrIl5csS"
GOTRUE_SAML_PRIVATE_KEY: "MIIEpAIBAAKCAQEAz625FMeC/kzE3M1PcX9klYmq4cNJKCXFl3UAiu/VR+RsPoMskVloyNaeESx+C/XhjMySxOGyeAepO6haIybMFqbEiBPjkQNASYUcEdp+pfGliTkgkiffiq3qSIt+ZylVUniGEEnM3JznIoFlW9ikNDlCTRObasIQ0io3bwaRP5pnzMDAPc7i8xBlkybj8Mu3HZmGU+xqiv1zNP9kSWINsMm4wt6Lwqbqt+LNr0q+F3H9yORbErFEGRsAsPMTtPIwX8eUb241MU5WmQ8n7Ea/U1+E3scaPr44TSZg9Xl+KwEVhdX9yX6/QKBefv6d/IwgNVkHxyRmRh9dkONxhWZ/8wIDAQABAoIBAAQjEEhHLydUrSk+18HJiV3nN6W2p6rqkbSSKpgZ7fQ4KyXVpBojH1C84boy2jHvzHXrD1NnsY/tiyP6lw0TNUaQPOL/Dm3xlCLCyYvbf+FbXnJM1obCz5OqIjwetz5j1uTFLNp/NdsBLyODU1sQhjjaGSWC6fom8oHVQHRwO416Qz11ZrOzXB8WDUyPImFkT7hU5F2MJFLU94MY6dBC0NKQBWIvFZQMN8WHoTeTlDcdljN9qduqDFAdMZi6JW0YNr0Ycvgt5qn/Me5EFN3+s3RVRnTL/rSENKeKJFcDXes3XEKxbwtzMVqa6sHZrt6LJtN8jx3tpryD2priCjC0TU0CgYEA7RdDpqmgtkeWHeu5nfzJyE1TEvl2qIezhpNIBwYuzACWRWWbzK3XKaLMq91JJSacHLB9kYpp08Rzsk33m6OLk+Q7g1E8ltHyMvR8avX7kczbL4/FV50Ydb14MOrPPlL/xemL0/faIRmfhGaQ3XgOAIqCoYIPY3HHjCUAMRDpZI8CgYEA4D3xJ9+qCtqzwf6afBHfymkCkEn8mO+4dB6kdXIjppor0EW8Xvg4zDYMq9RmO/ROUQypljCLrwx9ZiElNPTwmIAFPWjuSpAEyzZdxEz0H01PhwERvdMtt6FFTSGRQsTUzWTa7oYAn8K/Fu4VkKBVdbmqQhfUdsk+/RqUHRw/iF0CgYEA400th6gSsw7YpeDr+MJ09brkTUmrcBGBlSC4qjtMPDrH1sp+XvG/WWSCErc5PAvTGVI/YHwxz1wFi8lh/O4DkAr8333Pt8yaBi4M5kLkJ7kd3nBYwxGSdLbsdwF3JQpPuv+YFeUGVDuLilUGx70kt3IToSHe/PkFVZ/XmjLbf5MCgYAHAQhKRYsrIZ+hvJEYtPo3eUYyOY1hPYOWZOqgHHuOlZwuui7jDH/BqSKGL3EuCDh2AZ4+aa/DPPGhwgFGgSwOp1kCjQd8Xrk3m7AcFIc/fwuv3NGwCyuPY8MlYJoH6tv2umK4NolIdC3Bypfz134z2iO+Qr5JI4oLH8xmiF5XpQKBgQDM+vmlxMcHfl0OcnAJuQ0SaqVk6ufrMKRg8dPSvn2G84LdF3Vbr0Qx0vCRrmz85Netj5RdqkQh1dhi/QWMGegMw+bPmrDM6/CCEhT+9e6v5r2iKt3BbskbWdyhTm/nX98Er139/0xllF5Cyx54Xq2cTnDEM/Zaq+UXREHTr/L61Q=="
# ==== Minio (S3) ====
APPFLOWY_S3_MINIO_URL: "https://s3.minio.panic.haus"
MINIO_HOST: "s3.minio.panic.haus"
MINIO_PORT: "443"
AWS_ACCESS_KEY: "rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow" # must match your Minio secret
AWS_SECRET_KEY: "kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z" # must match your Minio secret
APPFLOWY_S3_BUCKET: "appflowy" # your bucket name
APPFLOWY_S3_USE_MINIO: "true"
AWS_REGION: "cluster-panic-haus"
# If you use AWS S3 instead of Minio, set APPFLOWY_S3_CREATE_BUCKET / AWS_REGION here.
# ==== GoTrue SMTP (optional) ====
SMTP_HOST: "mail.mind-overflow.net"
SMTP_PORT: "465"
SMTP_USER: "cloud@mind-overflow.net"
SMTP_PASS: "PcYchuLLUyfT2gvY4Tx7wQ575Tnqjx84zVNoP6Mb"
SMTP_ADMIN_EMAIL: "hello@beatrice.wtf"
# ==== AppFlowy Mailer (Cloud) ====
SMTP_EMAIL: "cloud@mind-overflow.net"
SMTP_TLS_KIND: "wrapper" # "none" "wrapper" "required" "opportunistic"
# ==== Additional secrets for AppFlowy AI (if used) ====
AI_OPENAI_API_KEY: ""
# (Optional) any other secrets you need can go here.

View File

@@ -0,0 +1,95 @@
apiVersion: v1
kind: Service
metadata:
name: gotrue
namespace: appflowy
spec:
ports:
- port: 9999
targetPort: 9999
protocol: TCP
name: http
selector:
app: gotrue
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-cloud
namespace: appflowy
spec:
ports:
- port: 8000
targetPort: 8000
protocol: TCP
name: http
selector:
app: appflowy-cloud
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: admin-frontend
namespace: appflowy
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app: admin-frontend
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-worker
namespace: appflowy
spec:
ports:
- port: 8081
targetPort: 8081
protocol: TCP
name: http
selector:
app: appflowy-worker
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-web
namespace: appflowy
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app: appflowy-web
type: ClusterIP
# (If you added appflowy-ai)
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-ai
namespace: appflowy
spec:
ports:
- port: 5001
targetPort: 5001
protocol: TCP
name: http
selector:
app: appflowy-ai
type: ClusterIP

View File

@@ -8,4 +8,4 @@ spec:
storage: storage:
size: 20Gi size: 20Gi
storageClass: longhorn storageClass: longhorn
enableSuperuserAccess: true enableSuperuserAccess: true

View File

@@ -0,0 +1,19 @@
apiVersion: v1
appVersion: 0.32.2
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the default
scheduler for that.
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
version: 0.32.2

View File

@@ -0,0 +1,91 @@
# Descheduler for Kubernetes
[Descheduler](https://github.com/kubernetes-sigs/descheduler/) for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
## TL;DR:
```shell
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
helm install my-release --namespace kube-system descheduler/descheduler
```
## Introduction
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.14+
## Installing the Chart
To install the chart with the release name `my-release`:
```shell
helm install --namespace kube-system my-release descheduler/descheduler
```
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```shell
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
| Parameter | Description | Default |
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `timeZone` | configure `timeZone` for CronJob | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |

View File

@@ -0,0 +1,12 @@
Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq (.Values.replicas | int) 1 }}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- if .Values.leaderElection }}
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
WARNING: You enabled DryRun mode, you can't use Leader Election.
{{- end}}
{{- end}}
{{- end}}

View File

@@ -0,0 +1,104 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "descheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "descheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Expand the namespace of the release.
Allows overriding it for multi-namespace deployments in combined charts.
*/}}
{{- define "descheduler.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "descheduler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "descheduler.labels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
helm.sh/chart: {{ include "descheduler.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "descheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "descheduler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "descheduler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Leader Election
*/}}
{{- define "descheduler.leaderElection"}}
{{- if .Values.leaderElection -}}
- --leader-elect={{ .Values.leaderElection.enabled }}
{{- if .Values.leaderElection.leaseDuration }}
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
{{- end }}
{{- if .Values.leaderElection.renewDeadline }}
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
{{- end }}
{{- if .Values.leaderElection.retryPeriod }}
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
{{- end }}
{{- if .Values.leaderElection.resourceLock }}
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
{{- end }}
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }}
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
{{- if $resourceNamespace -}}
- --leader-elect-resource-namespace={{ $resourceNamespace }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,44 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"]
{{- end }}
{{- if and .Values.deschedulerPolicy .Values.deschedulerPolicy.metricsCollector .Values.deschedulerPolicy.metricsCollector.enabled }}
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "descheduler.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}
{{- end -}}

View File

@@ -0,0 +1,14 @@
{{- if .Values.deschedulerPolicy }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
data:
policy.yaml: |
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
kind: "DeschedulerPolicy"
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
{{- end }}

View File

@@ -0,0 +1,111 @@
{{- if eq .Values.kind "CronJob" }}
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
suspend: {{ .Values.suspend }}
{{- end }}
concurrencyPolicy: "Forbid"
{{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
{{- end }}
{{- if ne .Values.successfulJobsHistoryLimit nil }}
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
{{- end }}
{{- if ne .Values.failedJobsHistoryLimit nil }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }}
{{- if .Values.timeZone }}
timeZone: {{ .Values.timeZone }}
{{- end }}
jobTemplate:
spec:
{{- if .Values.ttlSecondsAfterFinished }}
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
{{- end }}
template:
metadata:
name: {{ template "descheduler.fullname" . }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 12 }}
{{- end }}
labels:
{{- include "descheduler.selectorLabels" . | nindent 12 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 12 }}
{{- end }}
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 16 }}
args:
- --policy-config-file=/policy-dir/policy.yaml
{{- range $key, $value := .Values.cmdOptions }}
{{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
ports:
{{- toYaml .Values.ports | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 16 }}
{{- end }}
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
{{- end }}
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,100 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
{{- if gt (.Values.replicas | int) 1 }}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
{{- else }}
replicas: 1
{{- end }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "descheduler.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 6 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 12 }}
args:
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
{{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
{{- end }}
{{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports:
{{- toYaml .Values.ports | nindent 12 }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,27 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.service.enabled true }}
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
spec:
clusterIP: None
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
{{- end }}
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
{{- include "descheduler.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,44 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.serviceMonitor.enabled true }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "descheduler.fullname" . }}-servicemonitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ include "descheduler.namespace" . }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
endpoints:
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
scheme: https
tlsConfig:
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
insecureSkipVerify: true
{{- end }}
{{- if .Values.serviceMonitor.serverName }}
serverName: {{ .Values.serviceMonitor.serverName }}
{{- end}}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
suite: Test Descheduler CronJob
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: CronJob
tests:
- it: creates CronJob when kind is set
template: templates/cronjob.yaml
asserts:
- isKind:
of: CronJob

View File

@@ -0,0 +1,49 @@
suite: Test Descheduler Deployment
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: Deployment
tests:
- it: creates Deployment when kind is set
template: templates/deployment.yaml
asserts:
- isKind:
of: Deployment
- it: enables leader-election
set:
leaderElection:
enabled: true
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect=true
- it: support leader-election resourceNamespace
set:
leaderElection:
enabled: true
resourceNamespace: test
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=test
- it: support legacy leader-election resourceNamescape
set:
leaderElection:
enabled: true
resourceNamescape: typo
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=typo

View File

@@ -0,0 +1,252 @@
# Default values for descheduler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# CronJob or Deployment
kind: CronJob
image:
repository: registry.k8s.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
tag: ""
pullPolicy: IfNotPresent
imagePullSecrets:
# - name: container-registry-secret
resources:
requests:
cpu: 500m
memory: 256Mi
limits:
cpu: 500m
memory: 256Mi
ports:
- containerPort: 10258
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
podSecurityContext: {}
# fsGroup: 1000
nameOverride: ""
fullnameOverride: ""
# -- Override the deployment namespace; defaults to .Release.Namespace
namespaceOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1"
schedule: "*/2 * * * *"
suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 3
# failedJobsHistoryLimit: 1
# ttlSecondsAfterFinished 600
# timeZone: Etc/UTC
# Required when running as a Deployment
deschedulingInterval: 5m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
# only if that node is in the same zone as at least one already-running descheduler
replicas: 1
# Specifies whether Leader Election resources should be created
# Required when running as a Deployment
# NOTE: Leader election can't be activated if DryRun enabled
leaderElection: {}
# enabled: true
# leaseDuration: 15s
# renewDeadline: 10s
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamespace: "kube-system"
command:
- "/bin/descheduler"
cmdOptions:
v: 3
# Recommended to use the latest Policy API version supported by the Descheduler app version
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
# deschedulerPolicy contains the policies the descheduler will execute.
# To use policies stored in an existing configMap use:
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
# deschedulerPolicy: {}
deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# metricsCollector:
# enabled: true
# ignorePvcPods: true
# evictLocalStoragePods: true
# evictDaemonSetPods: true
# tracing:
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
# transportCert: ""
# serviceName: ""
# serviceNamespace: ""
# sampleRate: 1.0
# fallbackToNoOpProviderOnError: true
profiles:
- name: default
pluginConfig:
- name: DefaultEvictor
args:
ignorePvcPods: true
evictLocalStoragePods: true
- name: RemoveDuplicates
- name: RemovePodsHavingTooManyRestarts
args:
podRestartThreshold: 100
includingInitContainers: true
- name: RemovePodsViolatingNodeAffinity
args:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- name: LowNodeUtilization
args:
thresholds:
cpu: 40
memory: 30
pods: 30
targetThresholds:
cpu: 50
memory: 60
pods: 50
plugins:
balance:
enabled:
- RemoveDuplicates
- RemovePodsViolatingTopologySpreadConstraint
- LowNodeUtilization
deschedule:
enabled:
- RemovePodsHavingTooManyRestarts
- RemovePodsViolatingNodeTaints
- RemovePodsViolatingNodeAffinity
- RemovePodsViolatingInterPodAntiAffinity
priorityClassName: system-cluster-critical
nodeSelector: {}
# foo: bar
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/name: descheduler
tolerations: []
# - key: 'management'
# operator: 'Equal'
# value: 'tool'
# effect: 'NoSchedule'
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
podAnnotations: {}
podLabels: {}
dnsConfig: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
service:
enabled: false
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
#
ipFamilyPolicy: ""
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
# E.g.
# ipFamilies:
# - IPv6
# - IPv4
ipFamilies: []
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
# Add custom labels to the ServiceMonitor resource
additionalLabels: {}
# prometheus: kube-prometheus-stack
interval: ""
# honorLabels: true
insecureSkipVerify: true
serverName: null
metricRelabelings: []
# - action: keep
# regex: 'descheduler_(build_info|pods_evicted)'
# sourceLabels: [__name__]
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace

View File

@@ -2,21 +2,20 @@ apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch kind: Elasticsearch
metadata: metadata:
name: elasticsearch-ha name: elasticsearch-ha
namespace: elasticsearch
spec: spec:
version: 7.17.0 version: 8.17.4
nodeSets: nodeSets:
- name: default - name: default
count: 3 count: 3
config: config:
node.store.allow_mmap: false # Recommended for containerized environments node.store.allow_mmap: false
volumeClaimTemplates: # volumeClaimTemplates:
- metadata: # - metadata:
name: elasticsearch-data # name: elasticsearch-data
spec: # spec:
accessModes: # accessModes:
- ReadWriteOnce # - ReadWriteOnce
storageClassName: longhorn # storageClassName: longhorn
resources: # resources:
requests: # requests:
storage: 20Gi # storage: 5Gi

View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana-ingress
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
tls:
- hosts:
- query.prod.panic.haus
secretName: kibana-tls
rules:
- host: query.prod.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana-ha-kb-http
port:
number: 5601

View File

@@ -2,9 +2,8 @@ apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana kind: Kibana
metadata: metadata:
name: kibana-ha name: kibana-ha
namespace: elasticsearch
spec: spec:
version: 7.17.0 version: 8.17.4
count: 2 count: 2
elasticsearchRef: elasticsearchRef:
name: elasticsearch-ha name: elasticsearch-ha

View File

@@ -1,9 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
namespace: elastic-system
resources: resources:
- crds.yaml # - crds.yaml
- opereator.yaml - operator.yaml
# - elasticsearch.yaml - elasticsearch.yaml
# - kibana.yaml - kibana.yaml
- kibana-ingress.yaml
# - longstash.yaml # - longstash.yaml

View File

@@ -13,3 +13,12 @@ data:
url: http://prometheus-k8s.monitoring:9090 url: http://prometheus-k8s.monitoring:9090
isDefault: true isDefault: true
editable: false editable: false
- name: Elasticsearch
type: elasticsearch
access: proxy
url: https://elasticsearch-ha-es-http.elastic-system.svc:9200
jsonData:
esVersion: 8.17.4
timeField: "@timestamp"
tlsSkipVerify: true
editable: false

View File

@@ -5,4 +5,4 @@ resources:
- grafana-deploy.yaml - grafana-deploy.yaml
- grafana-ingress.yaml - grafana-ingress.yaml
- grafana-svc.yaml - grafana-svc.yaml
- prometheus-ds.yaml - data-sources.yaml

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Secret
metadata:
name: grafana-db-secret
namespace: grafana
type: Opaque
data:
username: Z3JhZmFuYQ==
password: dndyMGc5aWpoMGIzaXJka3ZqMG1ndXZoM3I=
---
apiVersion: v1
kind: Secret
metadata:
name: grafana-oauth-secret
namespace: grafana
type: Opaque
data:
client-secret: VFVEYU5uY091b1Y1QzFmeUJaeXN3ZzNEU3VYWU9laEQ=

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: keycloak-db-secret
namespace: keycloak
type: Opaque
data:
username: a2V5Y2xvYWs= # base64 encoded
password: dTgyNXFDTnhmckJTY0tUb1RkM1c5ektWUHhwVnNpN0w= # base64 encoded

View File

@@ -103,7 +103,7 @@ data:
reclaimPolicy: "Delete" reclaimPolicy: "Delete"
volumeBindingMode: Immediate volumeBindingMode: Immediate
parameters: parameters:
numberOfReplicas: "3" numberOfReplicas: "1"
staleReplicaTimeout: "30" staleReplicaTimeout: "30"
fromBackup: "" fromBackup: ""
fsType: "ext4" fsType: "ext4"

View File

@@ -15,7 +15,7 @@ spec:
spec: spec:
containers: containers:
- name: oauth2-proxy-longhorn - name: oauth2-proxy-longhorn
image: quay.io/oauth2-proxy/oauth2-proxy:v7.8.1-arm64 image: quay.io/oauth2-proxy/oauth2-proxy:v7.8.2
args: args:
- --provider=keycloak - --provider=keycloak
- --client-id=longhorn - --client-id=longhorn

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: minio-operator-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: minio-operator-role
subjects:
- kind: ServiceAccount
name: minio-operator
namespace: default

View File

@@ -0,0 +1,178 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: minio-operator-role
rules:
- apiGroups:
- "apiextensions.k8s.io"
resources:
- customresourcedefinitions
verbs:
- get
- update
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- update
- list
- apiGroups:
- ""
resources:
- namespaces
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- pods
- services
- events
- configmaps
verbs:
- get
- watch
- create
- list
- delete
- deletecollection
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- watch
- create
- update
- list
- delete
- deletecollection
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
- deployments
- deployments/finalizers
verbs:
- get
- create
- list
- patch
- watch
- update
- delete
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- create
- list
- patch
- watch
- update
- delete
- apiGroups:
- "certificates.k8s.io"
resources:
- "certificatesigningrequests"
- "certificatesigningrequests/approval"
- "certificatesigningrequests/status"
verbs:
- update
- create
- get
- delete
- list
- apiGroups:
- certificates.k8s.io
resourceNames:
- kubernetes.io/legacy-unknown
- kubernetes.io/kube-apiserver-client
- kubernetes.io/kubelet-serving
- beta.eks.amazonaws.com/app-serving
resources:
- signers
verbs:
- approve
- sign
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- minio.min.io
- sts.min.io
- job.min.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- min.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- monitoring.coreos.com
resources:
- prometheuses
verbs:
- '*'
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- get
- update
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- list
- patch
- update
- deletecollection

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- minio.min.io_tenants.yaml
- sts.min.io_policybindings.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,133 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
operator.min.io/version: v7.0.1
name: policybindings.sts.min.io
spec:
group: sts.min.io
names:
kind: PolicyBinding
listKind: PolicyBindingList
plural: policybindings
shortNames:
- policybinding
singular: policybinding
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .status.currentState
name: State
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
properties:
application:
properties:
namespace:
type: string
serviceaccount:
type: string
required:
- namespace
- serviceaccount
type: object
policies:
items:
type: string
type: array
required:
- application
- policies
type: object
status:
properties:
currentState:
type: string
usage:
nullable: true
properties:
authotizations:
format: int64
type: integer
type: object
required:
- currentState
- usage
type: object
type: object
served: true
storage: false
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .status.currentState
name: State
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
schema:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
properties:
application:
properties:
namespace:
type: string
serviceaccount:
type: string
required:
- namespace
- serviceaccount
type: object
policies:
items:
type: string
type: array
required:
- application
- policies
type: object
status:
properties:
currentState:
type: string
usage:
nullable: true
properties:
authotizations:
format: int64
type: integer
type: object
required:
- currentState
- usage
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-operator
namespace: minio-operator
labels:
app.kubernetes.io/instance: minio-operator
app.kubernetes.io/name: operator
spec:
replicas: 2
selector:
matchLabels:
name: minio-operator
strategy:
type: Recreate
template:
metadata:
labels:
name: minio-operator
app.kubernetes.io/instance: minio-operator
app.kubernetes.io/name: operator
spec:
serviceAccountName: minio-operator
containers:
- name: minio-operator
image: minio/operator:v7.0.1
imagePullPolicy: IfNotPresent
args:
- controller
resources:
requests:
cpu: 200m
memory: 256Mi
ephemeral-storage: 500Mi
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
env:
- name: MINIO_CONSOLE_TLS_ENABLE
value: "off"
- name: OPERATOR_STS_ENABLED
value: "on"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: name
operator: In
values:
- minio-operator
topologyKey: kubernetes.io/hostname

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crds/

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Namespace
metadata:
name: minio-operator
labels:
pod-security.kubernetes.io/enforce: restricted
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/audit: restricted
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: minio-operator
namespace: default

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
name: operator # Please do not change this value
labels:
name: minio-operator
app.kubernetes.io/instance: minio-operator
app.kubernetes.io/name: operator
namespace: minio-operator
spec:
type: ClusterIP
ports:
- port: 4221
name: http
selector:
name: minio-operator
operator: leader
---
apiVersion: v1
kind: Service
metadata:
name: sts # Please do not change this value
labels:
name: minio-operator
namespace: minio-operator
spec:
type: ClusterIP
ports:
- port: 4223
targetPort: 4223
name: https
selector:
name: minio-operator

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minio-operator
commonAnnotations:
operator.min.io/authors: "MinIO, Inc."
operator.min.io/license: "AGPLv3"
operator.min.io/support: "https://subnet.min.io"
operator.min.io/version: v7.0.1
commonLabels:
app.kubernetes.io/name: operator
resources:
- base/namespace.yaml
- base/service-account.yaml
- base/cluster-role.yaml
- base/cluster-role-binding.yaml
- base/crds/
- base/service.yaml
- base/deployment.yaml

View File

@@ -0,0 +1,37 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-ingress
namespace: minio-tenant
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
spec:
rules:
- host: s3.minio.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio
port:
number: 9000
- host: console.minio.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio-console
port:
number: 9090
tls:
- hosts:
- s3.minio.panic.haus
- console.minio.panic.haus
secretName: minio-tls

View File

@@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minio-tenant
resources:
- namespace.yaml
- secret.yaml
- tenant.yaml
- ingress.yaml
- svc-minio.yaml
- svc-minio-console.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: minio-tenant

View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Secret
metadata:
name: storage-configuration
namespace: minio-tenant
stringData:
config.env: |-
export MINIO_ROOT_USER="rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow"
export MINIO_ROOT_PASSWORD="kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z"
export MINIO_DOMAIN="s3.minio.panic.haus"
export MINIO_BROWSER_REDIRECT_URL="https://console.minio.panic.haus"
MINIO_REGION_NAME="cluster-panic-haus"
type: Opaque
---
apiVersion: v1
data:
CONSOLE_ACCESS_KEY: Y29uc29sZQ==
CONSOLE_SECRET_KEY: ZGRhTDBZSHhlTnR2ZDM4SVI5TVdtS3VFU21ONE00NG4=
kind: Secret
metadata:
name: storage-user
namespace: minio-tenant
type: Opaque

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: minio-console
namespace: minio-tenant
labels:
app: minio
spec:
type: ClusterIP
selector:
v1.min.io/tenant: panic-minio
ports:
- name: http
port: 9090
targetPort: 9090
protocol: TCP

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio-tenant
labels:
app: minio
spec:
type: ClusterIP
selector:
v1.min.io/tenant: panic-minio
ports:
- name: http-minio
port: 80
targetPort: 9000
protocol: TCP

View File

@@ -0,0 +1,79 @@
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
annotations:
prometheus.io/path: /minio/v2/metrics/cluster
prometheus.io/port: "9000"
prometheus.io/scrape: "true"
labels:
app: minio
name: panic-minio
namespace: minio-tenant
spec:
exposeServices: {}
imagePullPolicy: IfNotPresent
certConfig: {}
configuration:
name: storage-configuration
env: []
requestAutoCert: false
externalCertSecret: []
externalCaCertSecret: []
externalClientCertSecrets: []
features:
bucketDNS: false
domains: {}
image: quay.io/minio/minio:RELEASE.2025-04-03T14-56-28Z
imagePullSecret: {}
mountPath: /export
podManagementPolicy: Parallel
pools:
- name: pool-0
affinity:
nodeAffinity: {}
podAffinity: {}
podAntiAffinity: {}
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
nodeSelector: {}
resources: {}
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
servers: 4
tolerations: []
topologySpreadConstraints: []
volumeClaimTemplate:
apiVersion: v1
kind: persistentvolumeclaims
metadata: {}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn
status: {}
volumesPerServer: 1
priorityClassName: ""
serviceAccountName: ""
serviceMetadata:
consoleServiceAnnotations: {}
consoleServiceLabels: {}
minioServiceAnnotations: {}
minioServiceLabels: {}
subPath: ""
users:
- name: storage-user

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: n8n
resources:
- n8n-claim0-persistentvolumeclaim.yaml
- n8n-ingress.yaml
- namespace.yaml
- n8n-deployment.yaml
- n8n-service.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
service: n8n-claim0
name: n8n-claim0
namespace: n8n
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
service: n8n
name: n8n
namespace: n8n
spec:
replicas: 1
selector:
matchLabels:
service: n8n
strategy:
type: Recreate
template:
metadata:
labels:
service: n8n
spec:
initContainers:
- name: volume-permissions
image: busybox:1.36
command: ["sh", "-c", "chown 1000:1000 /data"]
volumeMounts:
- name: n8n-claim0
mountPath: /data
containers:
- command:
- /bin/sh
args:
- -c
- sleep 5; n8n start
env:
- name: N8N_EDITOR_BASE_URL
value: https://n8n.prod.panic.haus/
- name: WEBHOOK_URL
value: https://n8n.prod.panic.haus/
- name: DB_TYPE
value: postgresdb
- name: DB_POSTGRESDB_HOST
value: postgres-base-rw.postgres.svc.cluster.local
- name: DB_POSTGRESDB_PORT
value: "5432"
- name: DB_POSTGRESDB_DATABASE
value: n8ndb
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: n8n-db-secret
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: n8n-db-secret
key: password
- name: N8N_PROTOCOL
value: http
- name: N8N_PORT
value: "5678"
image: n8nio/n8n
name: n8n
ports:
- containerPort: 5678
resources:
requests:
memory: "250Mi"
limits:
memory: "500Mi"
volumeMounts:
- mountPath: /home/node/.n8n
name: n8n-claim0
restartPolicy: Always
volumes:
- name: n8n-claim0
persistentVolumeClaim:
claimName: n8n-claim0
- name: n8n-secret
secret:
secretName: n8n-secret
- name: n8n-db-secret
secret:
secretName: n8n-db-secret

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: n8n
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- n8n.prod.panic.haus
secretName: n8n-tls
rules:
- host: n8n.prod.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: n8n
port:
number: 5678

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
service: n8n
name: n8n
namespace: n8n
spec:
type: ClusterIP
ports:
- name: "5678"
port: 5678
targetPort: 5678
protocol: TCP
selector:
service: n8n

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: n8n

9
deploy/n8n/secret.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: n8n-db-secret
namespace: n8n
type: Opaque
data:
username: bjhu # base64 encoded
password: SHFCTkdHcndzN1VFSk5tUDJRa3lIWGF6YkJaN3lTUkY= # base64 encoded

View File

@@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline
namespace: outline-wiki
labels:
app: outline
spec:
replicas: 3
selector:
matchLabels:
app: outline
template:
metadata:
labels:
app: outline
spec:
containers:
- name: outline
image: outlinewiki/outline:0.84.0
ports:
- containerPort: 8089
envFrom:
- secretRef:
name: outline-secrets
env:
- name: PORT
value: "8089"

View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: outline-ingress
namespace: outline-wiki
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
spec:
ingressClassName: nginx
tls:
- hosts:
- outline.panic.haus
secretName: outline-wiki-tls
rules:
- host: outline.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: outline
port:
number: 8089

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: outline-wiki
resources:
- deploy.yaml
- service.yaml
- secret.yaml
- ingress.yaml

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: Secret
metadata:
name: outline-secrets
namespace: outline-wiki
type: Opaque
stringData:
SECRET_KEY: eae7766055bb20e0b6fb6838cc889121697e59a2b82fd1590dc47a91489acd95
UTILS_SECRET: f9e0e1158b7ec2239b465c602172493ee2d1b0765ca6b659b35f64959408492d
DATABASE_URL: postgres://outline:ULYpprqxQeS2rSBXF8NxEr4FhJkUAwhWJtkwZij6XwBDSvUUKeAifBBG885fPSmd@postgres-base-rw.postgres/outlinedb
REDIS_URL: redis://redis-lb.redis.svc.cluster.local:6379
URL: https://outline.panic.haus
PGSSLMODE: disable
AWS_ACCESS_KEY_ID: rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow
AWS_SECRET_ACCESS_KEY: kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z
AWS_S3_UPLOAD_BUCKET_URL: https://s3.minio.panic.haus/
AWS_REGION: cluster-panic-haus
AWS_S3_UPLOAD_BUCKET_NAME: outline
FILE_STORAGE_UPLOAD_MAX_SIZE: "26214400"
AWS_S3_FORCE_PATH_STYLE: "true"
AWS_S3_ACL: private
OIDC_DISPLAY_NAME: panicSSO
OIDC_CLIENT_ID: outline
OIDC_CLIENT_SECRET: W4KxpMkWiRL5EU8yknamRkkZpFFQ1rKN
OIDC_AUTH_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/auth?scope=openid
OIDC_TOKEN_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/token?scope=openid
OIDC_USERINFO_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/userinfo?scope=openid
SMTP_HOST: mail.mind-overflow.net
SMTP_PORT: "465"
SMTP_USERNAME: cloud@mind-overflow.net
SMTP_PASSWORD: PcYchuLLUyfT2gvY4Tx7wQ575Tnqjx84zVNoP6Mb
SMTP_FROM_EMAIL: cloud@mind-overflow.net
SMTP_REPLY_EMAIL: cloud@mind-overflow.net
SMTP_SECURE: "true"

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: outline
namespace: outline-wiki
spec:
selector:
app: outline
ports:
- name: http
port: 80
targetPort: 8089
type: ClusterIP

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: clickhouse-config
data:
clickhouse-config.xml: |
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
</logger>
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
<text_log remove="remove"/>
<trace_log remove="remove"/>
<metric_log remove="remove"/>
<asynchronous_metric_log remove="remove"/>
<!-- Update: Required for newer versions of Clickhouse -->
<session_log remove="remove"/>
<part_log remove="remove"/>
</clickhouse>

View File

@@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse
spec:
replicas: 1
selector:
matchLabels:
app: clickhouse
template:
metadata:
labels:
app: clickhouse
spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:22.6-alpine
# You may expose ports if needed (for example, HTTP on 8123)
ports:
- containerPort: 8123
volumeMounts:
- name: event-data
mountPath: /var/lib/clickhouse
- name: clickhouse-config
mountPath: /etc/clickhouse-server/config.d/logging.xml
subPath: clickhouse-config.xml
readOnly: true
- name: clickhouse-user-config
mountPath: /etc/clickhouse-server/users.d/logging.xml
subPath: clickhouse-user-config.xml
readOnly: true
volumes:
- name: event-data
persistentVolumeClaim:
claimName: event-data-pvc
- name: clickhouse-config
configMap:
name: clickhouse-config
- name: clickhouse-user-config
configMap:
name: clickhouse-user-config

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: event-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: clickhouse
labels:
app: clickhouse
spec:
ports:
- name: http
protocol: TCP
port: 8123
targetPort: 8123
selector:
app: clickhouse

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: clickhouse-user-config
data:
clickhouse-user-config.xml: |
<clickhouse>
<profiles>
<default>
<log_queries>0</log_queries>
<log_query_threads>0</log_query_threads>
</default>
</profiles>
</clickhouse>

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: plausible
resources:
- clickhouse-config.yaml
- clickhouse-pvc.yaml
- clickhouse-svc.yaml
- mail-svc.yaml
- plausible-secret.yaml
- clickhouse-deploy.yaml
- clickhouse-user-config.yaml
- mail-deploy.yaml
- plausible-deploy.yaml
- plausible-ingress.yaml
- plausible-svc.yaml

View File

@@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mail
spec:
replicas: 1
selector:
matchLabels:
app: mail
template:
metadata:
labels:
app: mail
spec:
nodeSelector:
kubernetes.io/arch: "amd64"
containers:
- name: mail
image: bytemark/smtp
ports:
- containerPort: 25

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: mail
spec:
selector:
app: mail
ports:
- protocol: TCP
port: 25
targetPort: 25

View File

@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: plausible
spec:
replicas: 1
selector:
matchLabels:
app: plausible
template:
metadata:
labels:
app: plausible
spec:
containers:
- name: plausible
image: plausible/analytics:latest
command:
- sh
- -c
- "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run"
ports:
- containerPort: 8000
envFrom:
- secretRef:
name: plausible-env

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: plausible-ingress
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
ingressClassName: nginx
tls:
- hosts:
- webstats.beatrice.wtf
secretName: plausible-tls
rules:
- host: webstats.beatrice.wtf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: plausible
port:
number: 8000

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Secret
metadata:
name: plausible-env
data:
ADMIN_USER_EMAIL: aGVsbG9AYmVhdHJpY2Uud3Rm
ADMIN_USER_NAME: YmVhdHJpY2U=
ADMIN_USER_PWD: Xl55Z1d4UGtEMiRQSlF1JXZAQ1Q1ZF5lNnRDbmhBXk5qZnpTVlYyISNTN2U3N25wU25wZkpUYWF6RGVWRFVSTA==
BASE_URL: aHR0cHM6Ly93ZWJzdGF0cy5iZWF0cmljZS53dGY=
DATABASE_URL: cG9zdGdyZXM6Ly9wbGF1c2libGU6cnY5Mzhnd2d3ZzQzNGYyZjRoZzNnN2gzMDg5N2czaDVnMDk4akBwb3N0Z3Jlcy1iYXNlLXJ3LnBvc3RncmVzOjU0MzIvcGxhdXNpYmxlX2Ri
CLICKHOUSE_DATABASE_URL: aHR0cDovL2NsaWNraG91c2U6ODEyMy9wbGF1c2libGVfZXZlbnRzX2Ri
DISABLE_REGISTRATION: dHJ1ZQ==
MAILER_EMAIL: Y2xvdWRAbWluZC1vdmVyZmxvdy5uZXQ=
PORT: ODAwMA==
SECRET_KEY_BASE: M1FRQS9EdEdmR3c3cytjMzF2dnlmZ3lVc2F4RStNOWsxSWIvNVBjTUJIQjVHNWdpek00a2tSQ2lvbUFkU0lKR3FybGJ5R2h6VEFOcUJLWWZyeFZ0eHc9PQ==
SMTP_HOST_ADDR: bWFpbC5taW5kLW92ZXJmbG93Lm5ldA==
SMTP_HOST_PORT: NTg3
SMTP_HOST_SSL_ENABLED: ZmFsc2U=
SMTP_USER_NAME: Y2xvdWRAbWluZC1vdmVyZmxvdy5uZXQ=
SMTP_USER_PWD: UGNZY2h1TExVeWZUMmd2WTRUeDd3UTU3NVRucWp4ODR6Vk5vUDZNYg==

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: plausible
spec:
selector:
app: plausible
ports:
- protocol: TCP
port: 8000
targetPort: 8000

View File

@@ -13,8 +13,14 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
prometheus+:: { prometheus+:: {
prometheus+: { prometheus+: {
spec+: { spec+: {
serviceMonitorSelector: {},
externalUrl: 'https://metrics.prod.panic.haus', externalUrl: 'https://metrics.prod.panic.haus',
retention: '30d', retention: '30d',
retentionSize: '16GB',
additionalScrapeConfigs: {
name: 'prometheus-additional-scrape-configs',
key: 'additional-scrape-configs.yaml',
},
storage: { storage: {
volumeClaimTemplate: { volumeClaimTemplate: {
spec: { spec: {

View File

@@ -10,6 +10,9 @@ metadata:
name: k8s name: k8s
namespace: monitoring namespace: monitoring
spec: spec:
additionalScrapeConfigs:
key: additional-scrape-configs.yaml
name: prometheus-additional-scrape-configs
alerting: alerting:
alertmanagers: alertmanagers:
- apiVersion: v2 - apiVersion: v2
@@ -38,6 +41,7 @@ spec:
requests: requests:
memory: 400Mi memory: 400Mi
retention: 30d retention: 30d
retentionSize: 16GB
ruleNamespaceSelector: {} ruleNamespaceSelector: {}
ruleSelector: {} ruleSelector: {}
scrapeConfigNamespaceSelector: {} scrapeConfigNamespaceSelector: {}

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
name: prometheus-additional-scrape-configs
namespace: monitoring
stringData:
additional-scrape-configs.yaml: |
- job_name: 'proxmox-holly-node-exporter'
scheme: https
metrics_path: /metrics
static_configs:
- targets: ['node-exporter.holly.panic.haus']

View File

@@ -16,6 +16,8 @@ spec:
labels: labels:
app: redis app: redis
spec: spec:
nodeSelector:
kubernetes.io/arch: amd64
containers: containers:
- name: redis - name: redis
image: redis:7.4-alpine image: redis:7.4-alpine

View File

@@ -0,0 +1 @@
.git

View File

@@ -0,0 +1,12 @@
dependencies:
- name: mongodb
repository: https://charts.bitnami.com/bitnami
version: 13.18.5
- name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.5.38
- name: nats
repository: https://nats-io.github.io/k8s/helm/charts
version: 0.15.1
digest: sha256:9ba75908eaa8689c2f5c483a30adaf9028130fd71a18f7ea15487d5a43cdc9f5
generated: "2025-03-28T13:55:39.794015571Z"

View File

@@ -0,0 +1,35 @@
apiVersion: v2
appVersion: 7.4.1
dependencies:
- condition: mongodb.enabled
name: mongodb
repository: https://charts.bitnami.com/bitnami
version: 13.x.x
- condition: postgresql.enabled
name: postgresql
repository: https://charts.bitnami.com/bitnami
version: 15.x.x
- condition: microservices.enabled
name: nats
repository: https://nats-io.github.io/k8s/helm/charts
version: 0.15.x
description: Prepare to take off with the ultimate chat platform, experience the next
level of team communications
home: https://rocket.chat/
icon: https://raw.githubusercontent.com/RocketChat/Rocket.Chat.Artwork/master/Logos/icon.svg
keywords:
- chat
- communication
- http
- web
- application
- nodejs
- javascript
- meteor
maintainers:
- email: cloud@rocket.chat
name: RocketChat
name: rocketchat
sources:
- https://github.com/RocketChat/Docker.Official.Image/
version: 6.24.0

View File

@@ -0,0 +1,429 @@
# Rocket.Chat
[Rocket.Chat](https://rocket.chat/) is free, unlimited and open source. Replace email, HipChat & Slack with the ultimate team chat software solution.
> **WARNING**: Upgrading to chart version 5.4.3 or higher might require extra steps to successfully update MongoDB and Rocket.Chat. See [Upgrading to 5.4.3](#to-543) for more details.
## Introduction
This chart bootstraps a [Rocket.Chat](https://rocket.chat/) Deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. It provisions a fully featured Rocket.Chat installation.
In addition, this chart supports scaling of Rocket.Chat for increased server capacity and high availability (requires enterprise license). For more information on Rocket.Chat and its capabilities, see its [documentation](https://rocket.chat/docs/).
## Prerequisites Details
The chart has an optional dependency on the [MongoDB](https://github.com/bitnami/charts/tree/master/bitnami/mongodb) chart.
By default, the MongoDB chart requires PV support on underlying infrastructure (may be disabled).
## Installing the Chart
To install the chart with the release name `rocketchat`:
```console
$ helm install rocketchat rocketchat/rocketchat --set mongodb.auth.passwords={rocketchatPassword},mongodb.auth.rootPassword=rocketchatRootPassword
```
If you got a registration token for [Rocket.Chat Cloud](https://cloud.rocket.chat), you can also include it:
```console
$ helm install rocketchat rocketchat/rocketchat --set mongodb.auth.passwords={rocketchatPassword},mongodb.auth.rootPassword=rocketchatRootPassword,registrationToken=<paste the token here>
```
Usage of `Values.yaml` file is recommended over using command line arguments `--set`. You must set at least the database password and root password in the values file.
```yaml
mongodb:
auth:
passwords:
- rocketchat
rootPassword: rocketchatroot
```
Now use the following command to deploy
```shell
helm install rocketchat -f Values.yaml rocketchat/rocketchat
```
> Starting chart version 5.4.3, due to mongodb dependency, username, password and database entries must be arrays of the same length. Rocket.Chat will use the first entries of those arrays for its own use. `mongodb.auth.usernames` array defaults to `{rocketchat}` and `mongodb.auth.databases` array defaults to `{rocketchat}`
## Uninstalling the Chart
To uninstall/delete the `rocketchat` deployment:
```console
$ helm delete rocketchat
```
## Configuration
The following table lists the configurable parameters of the Rocket.Chat chart and their default values.
| Parameter | Description | Default |
|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------|
| `image.repository` | Image repository | `registry.rocket.chat/rocketchat/rocket.chat` |
| `image.tag` | Image tag | `3.18.3` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `host` | Hostname for Rocket.Chat. Also used for ingress (if enabled) | `""` |
| `replicaCount` | Number of replicas to run | `1` |
| `smtp.enabled` | Enable SMTP for sending mails | `false` |
| `smtp.existingSecret` | Use existing secret for SMTP account | `""` |
| `smtp.username` | Username of the SMTP account | `""` |
| `smtp.password` | Password of the SMTP account | `""` |
| `smtp.host` | Hostname of the SMTP server | `""` |
| `smtp.port` | Port of the SMTP server | `587` |
| `extraEnv` | Extra environment variables for Rocket.Chat. Used with `tpl` function, so this needs to be a string | `""` |
| `extraSecret` | An already existing secret to be used by chat deployment. It needs to be a string | `""` |
| `extraVolumes` | Extra volumes allowing inclusion of certificates or any sort of file that might be required (see bellow) | `[]` |
| `extraVolumeMounts` | Where the aforementioned extra volumes should be mounted inside the container | `[]` |
| `podAntiAffinity` | Pod anti-affinity can prevent the scheduler from placing RocketChat replicas on the same node. The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. | `""` |
| `podAntiAffinityTopologyKey` | If anti-affinity is enabled sets the topologyKey to use for anti-affinity. This can be changed to, for example `failure-domain.beta.kubernetes.io/zone` | `kubernetes.io/hostname` |
| `affinity` | Assign custom affinity rules to the RocketChat instance https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | `{}` |
| `minAvailable` | Minimum number / percentage of pods that should remain scheduled | `1` |
| `existingMongodbSecret` | An already existing secret containing MongoDB Connection URL | `""` |
| `externalMongodbUrl` | MongoDB URL if using an externally provisioned MongoDB | `""` |
| `externalMongodbOplogUrl` | MongoDB OpLog URL if using an externally provisioned MongoDB. Required if `externalMongodbUrl` is set | `""` |
| `mongodb.enabled` | Enable or disable MongoDB dependency. Refer to the [stable/mongodb docs](https://github.com/bitnami/charts/tree/master/bitnami/mongodb#configuration) for more information | `true` |
| `persistence.enabled` | Enable persistence using a PVC. This is not necessary if you're using the default [GridFS](https://rocket.chat/docs/administrator-guides/file-upload/) file storage | `false` |
| `persistence.storageClass` | Storage class of the PVC to use | `""` |
| `persistence.accessMode` | Access mode of the PVC | `ReadWriteOnce` |
| `persistence.size` | Size of the PVC | `8Gi` |
| `persistence.existingClaim` | An Existing PVC name for rocketchat volume | `""` |
| `resources` | Pod resource requests and limits | `{}` |
| `securityContext.enabled` | Enable security context for the pod | `true` |
| `securityContext.runAsUser` | User to run the pod as | `999` |
| `securityContext.fsGroup` | fs group to use for the pod | `999` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `serviceAccount.name` | Name of the ServiceAccount to use. If not set and create is true, a name is generated using the fullname template | `""` |
| `ingress.enabled` | If `true`, an ingress is created | `false` |
| `ingress.pathType` | Sets the value for pathType for the created Ingress resource | `Prefix` |
| `ingress.annotations` | Annotations for the ingress | `{}` |
| `ingress.path` | Path of the ingress | `/` |
| `ingress.tls` | A list of [IngressTLS](https://kubernetes.io/docs/reference/kubernetes-api/service-resources/ingress-v1/#IngressSpec) items | `[]` |
| `license` | Contents of the Enterprise License file, if applicable | `""` |
| `prometheusScraping.enabled` | Turn on and off /metrics endpoint for Prometheus scraping | `false` |
| `prometheusScraping.port` | Port to use for the metrics for Prometheus to scrap on | `9458` |
| `serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator (prometheusScraping should be enabled) | `false` |
| `serviceMonitor.intervals` | The intervals at which metrics should be scraped | `[30s]` |
| `serviceMonitor.ports` | The port names at which container exposes Prometheus metrics | `[metrics]` |
| `serviceMonitor.interval` | deprecated, use `serviceMonitor.intervals` instead | `30s` |
| `serviceMonitor.port` | deprecated, use `serviceMonitor.ports` instead | `metrics` |
| `livenessProbe.enabled` | Turn on and off liveness probe | `true` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` |
| `livenessProbe.periodSeconds` | How often to perform the probe | `15` |
| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe | `3` |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` |
| `global.tolerations` | common tolerations for all pods (rocket.chat and all microservices) | [] |
| `global.annotations` | common annotations for all pods (rocket.chat and all microservices) | {} |
| `global.nodeSelector` | common nodeSelector for all pods (rocket.chat and all microservices) | {} |
| `global.affinity` | common affinity for all pods (rocket.chat and all microservices) | {} |
| `tolerations` | tolerations for main rocket.chat pods (the `meteor` service) | [] |
| `microservices.enabled` | Use [microservices](https://docs.rocket.chat/quick-start/installing-and-updating/micro-services-setup-beta) architecture | `false` |
| `microservices.presence.replicas` | Number of replicas to run for the given service | `1` |
| `microservices.ddpStreamer.replicas` | Idem | `1` |
| `microservices.streamHub.replicas` | Idem | `1` |
| `microservices.accounts.replicas` | Idem | `1` |
| `microservices.authorization.replicas` | Idem | `1` |
| `microservices.nats.replicas` | Idem | `1` |
| `microservices.presence.tolerations` | Pod tolerations | [] |
| `microservices.ddpStreamer.tolerations` | Pod tolerations | [] |
| `microservices.streamHub.tolerations` | Pod tolerations | [] |
| `microservices.accounts.tolerations` | Pod tolerations | [] |
| `microservices.authorization.tolerations` | Pod tolerations | [] |
| `microservices.presence.annotations` | Pod annotations | {} |
| `microservices.ddpStreamer.annotations` | Pod annotations | {} |
| `microservices.streamHub.annotations` | Pod annotations | {} |
| `microservices.accounts.annotations` | Pod annotations | {} |
| `microservices.authorization.annotations` | Pod annotations | {} |
| `microservices.presence.nodeSelector` | nodeSelector for the Pod | {} |
| `microservices.ddpStreamer.nodeSelector` | nodeSelector for the Pod | {} |
| `microservices.streamHub.nodeSelector` | nodeSelector for the Pod | {} |
| `microservices.accounts.nodeSelector` | nodeSelector for the Pod | {} |
| `microservices.authorization.nodeSelector`| nodeSelector for the Pod | {} |
| `microservices.presence.affinity` | Pod affinity | {} |
| `microservices.ddpStreamer.affinity` | Pod affinity | {} |
| `microservices.streamHub.affinity` | Pod affinity | {} |
| `microservices.accounts.affinity` | Pod affinity | {} |
| `microservices.authorization.affinity` | Pod affinity | {} |
| `readinessProbe.enabled` | affinity for the Pod | [] | | `true` |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `10` |
| `readinessProbe.periodSeconds` | How often to perform the probe | `15` |
| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe | `3` |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe | `1` |
| `registrationToken` | Registration Token for [Rocket.Chat Cloud ](https://cloud.rocket.chat) | "" |
| `service.annotations` | Annotations for the Rocket.Chat service | `{}` |
| `service.labels` | Additional labels for the Rocket.Chat service | `{}` |
| `service.type` | The service type to use | `ClusterIP` |
| `service.port` | The service port | `80` |
| `service.nodePort` | The node port used if the service is of type `NodePort` | `""` |
| `podDisruptionBudget.enabled` | Enable or disable PDB for RC deployment | `true` |
| `podLabels` | Additional pod labels for the Rocket.Chat pods | `{}` |
| `podAnnotations` | Additional pod annotations for the Rocket.Chat pods | `{}` |
| `federation.enabled` | Enable Rocket.Chat federation (through matrix)
| `federation.host` | FQDN for your matrix instance
| `federation.image.repository` | Image repository to use for federation image, defaults to `matrixdotorg`
| `federation.image.registry` | Image registry to use for federation image, defaults to `docker.io`
| `federation.image.tag` | Image tag to use for federation image, defaults to `latest`
| `federation.persistence.enabled` | Enabling persistence for matrix pod
| `postgresql.enabled` | Enabling postgresql for matrix (synapse), defaults to false, if false, uses sqlite
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install rocketchat -f values.yaml rocketchat/rocketchat
```
### Database Setup
Rocket.Chat uses a MongoDB instance to presist its data.
By default, the [MongoDB](https://github.com/bitnami/charts/tree/master/bitnami/mongodb) chart is deployed and a single MongoDB instance is created as the primary in a replicaset.
Please refer to this (MongoDB) chart for additional MongoDB configuration options.
If you are using chart defaults, make sure to set at least the `mongodb.auth.rootPassword` and `mongodb.auth.passwords` values.
> **WARNING**: The root credentials are used to connect to the MongoDB OpLog
#### Using an External Database
This chart supports using an existing MongoDB instance. Use the `` configuration options and disable MongoDB with `--set mongodb.enabled=false`
### Configuring Additional Environment Variables
```yaml
extraEnv: |
- name: MONGO_OPTIONS
value: '{"ssl": "true"}'
```
### Specifying aditional volumes
Sometimes, it's needed to include extra sets of files by means of exposing
them to the container as a mountpoint. The most common use case is the
inclusion of SSL CA certificates.
```yaml
extraVolumes:
- name: etc-certs
hostPath:
- path: /etc/ssl/certs
type: Directory
extraVolumeMounts:
- mountPath: /etc/ssl/certs
name: etc-certs
readOnly: true
```
### Increasing Server Capacity and HA Setup
To increase the capacity of the server, you can scale up the number of Rocket.Chat server instances across available computing resources in your cluster, for example,
```bash
$ kubectl scale --replicas=3 deployment/rocketchat
```
By default, this chart creates one MongoDB instance as a Primary in a replicaset. This is the minimum requirement to run Rocket.Chat 1.x+. You can also scale up the capacity and availability of the MongoDB cluster independently. Please see the [MongoDB chart](https://github.com/bitnami/charts/tree/master/bitnami/mongodb) for configuration information.
For information on running Rocket.Chat in scaled configurations, see the [documentation](https://rocket.chat/docs/installation/docker-containers/high-availability-install/#guide-to-install-rocketchat-as-ha-with-mongodb-replicaset-as-backend) for more details.
### Adding tolerations, annotations, nodeSelector and affinity
To add common tolerations, annotations, nodeSelector and affinity to all deployments
```yaml
global:
tolerations:
- # here
annotations:
# here
nodeSelector:
# here
# kubernetes.io/arch: amd64
affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/arch
# operator: In
# values:
# - amd64
```
Override tolerations or annotations for each microservice by adding to respective block's configuration. For example to override the global tolerations and annotations for ddp-streamer pods,
```yaml
microservices:
ddpStreamer:
tolerations:
- # add here
annotations:
# add here
nodeSelector:
# here
# kubernetes.io/arch: amd64
affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/arch
# operator: In
# values:
# - amd64
```
To override tolerations for `meteor` service, or the main rocket.chat deployment, add to the root tolerations key.
```yaml
tolerations:
- # ...
```
To override annotations for `meteor` service, or the main rocket.chat deployment, add to the root podAnnotations key.
```yaml
podAnnotations:
# add here
```
To override the nodeSelector for `meteor` service, or the main rocket.chat deployment, add to the root nodeSelector key.
```yaml
nodeSelector:
# add here
```
To override the affinity for `meteor` service, or the main rocket.chat deployment, add to the root affinity key.
```yaml
affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/arch
# operator: In
# values:
# - amd64
```
### Manage MongoDB and NATS nodeSelector and Affinity
If MongoDB and NATS own charts are used in the deployment, add the nodeSelector and Affinity to each service. Example:
```yaml
mongodb:
enabled: true
nodeSelector:
kubernetes.io/arch: amd64
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
nats:
nodeSelector:
kubernetes.io/arch: amd64
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/arch
operator: In
values:
- amd64
```
### Manage MongoDB secrets
This chart provides several ways to manage the Connection for MongoDB
* Values passed to the chart (externalMongodbUrl, externalMongodbOplogUrl)
* An ExistingMongodbSecret containing the MongoURL and MongoOplogURL
```yaml
apiVersion: v1
kind: Secret
metadata:
name: my-secret
type: Opaque
data:
mongo-uri: mongodb://user:password@localhost:27017/rocketchat
mongo-oplog-uri: mongodb://user:password@localhost:27017/local?replicaSet=rs0&authSource=admin
```
## Federation
You can enable federation by setting `federation.enabled` to true.
You need to make sure you have two domains, one for rocket.chat another for matrix.
```yaml
host: <rocket.chat domain>
federation:
host: <matrix domain>
```
Add the domains to ingress tls config
```yaml
ingress:
tls:
- secretName: <some secret>
hosts:
- <rocket.chat domain>
- <matrix domain>
```
For production, postgres is recommended. Enabled postgres
```yaml
postgresql:
enabled: true
```
For more details on configs, check [postgresql chart](https://artifacthub.io/packages/helm/bitnami/postgresql).
Since TLS is required, also make sure you have something like cert-manager is running on your cluster, and you add the annotations to the ingress with `ingress.annotations` (or whatever is the recommended way for your certificate issuer).
## hooks
To add custom annotations and labels to pods spawned by pre-upgrade hook
```yaml
hooks:
preUpgrade:
podAnnotatios: {} # here
podLabels: {} # here
```
## Upgrading
### To 5.4.3
Due to changes on upstream MongoDB chart, some variables have been renamed (previously deprecated), which, in turn changed how this chart generates its manifests. Values that need changing -
- `mongodb.auth.username` is no longer supported, and has been changed to `mongodb.auth.usernames` array. If you set it to something custom (defaults to `rocketchat`), make sure you update it to an array and the entry is the **first** entry in that array as that's what Rocket.Chat will use to connect to the database.
- `mongodb.auth.password` is no longer supported either and has been changed to `mongodb.auth.passwords` array. Update your values file to make it an array and make sure it's the first entry of that array.
- `mongodb.auth.database` is no longer supported either and has been changed to its plural version, `mongodb.auth.databases`. Update your values file, convert it to an array and make sure it's the first entry of that list.
- `mongodb.auth.rootUsername` and `mongodb.auth.rootPassword` are staying the same.
*`usernames`, `passwords` and `databases` arrays must be of the same length. Rocket.Chat chart will use the first entry for its mongodb connection string in `MONGO_URL` and `MONGO_OPLOG_URL`.*
On each chart update, the used image tag gets updated, **in most cases**. Same is true for the MongoDB chart we use as our dependency. Pre-5.4.3, we had been using the chart version 10.x.x, but starting 5.4.3, the dependency chart version has been bumped to the latest available version, 13.x.x. This chart defaults to mongodb 6.0.x as of the time of writing this.
As a warning, this chart will not handle MongoDB upgrades and will depend on the user to make sure it's running on the supported version. The upgrade will fail if any of the following requirements are not met -
- must not skip a MongoDB release. E.g. 4.2.x to 5.0.x will fail
- current `featureCompatibilityVersion` must be compatible with the version user is trying to upgrade to. E.g. if current database version and feature compatibility is 4.4 and 4.2 respectively, but user is trying to upgrade to 5.0, it'll fail
The chart will not check if the mongodb version is supported by the Rocket.Chat version considering deployments, that might occur in an airgapped environment. It is up to the user to make sure of that. Users can check Rocket.Chat's release notes to confirm that.
To get the currently deployed MongoDB version, the easiest method is to get into the mongo shell and running `db.version()`.
It is advised to pin your MongoDB dependency in the values file.
```yaml
mongodb:
image:
tag: # find from https://hub.docker.com/r/bitnami/mongodb/tags
```
References:
- [Run a shell inside a container (to check mongodb version)](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/)
- [MongoDB upgrade official documentation](https://www.mongodb.com/docs/manual/tutorial/upgrade-revision/)
- [MongoDB helm chart options](https://artifacthub.io/packages/helm/bitnami/mongodb)
### To 6.13.0
**This is only applicable if you both, enabled federation in chart version >=6.8, and want to keep using lighttpd.**
IFF you manually enabled ingress.federation.serveWellKnown (which was a hidden setting) before, during upgrade, disable it once before enabling it again.
Chart contained a bug that would cause `wellknown` deployment to fail to update (illegal live modification of `matchLabels`).

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,6 @@
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
version: 2.11.1
digest: sha256:ead8f26c76a9ec082f23629a358e8efd8f88d87aaed734bf41febcb8a7bc5d4c
generated: "2023-09-21T21:30:46.044974371Z"

View File

@@ -0,0 +1,41 @@
annotations:
category: Database
images: |
- name: kubectl
image: docker.io/bitnami/kubectl:1.25.14-debian-11-r5
- name: mongodb-exporter
image: docker.io/bitnami/mongodb-exporter:0.39.0-debian-11-r106
- name: mongodb
image: docker.io/bitnami/mongodb:6.0.10-debian-11-r8
- name: nginx
image: docker.io/bitnami/nginx:1.25.2-debian-11-r32
- name: os-shell
image: docker.io/bitnami/os-shell:11-debian-11-r72
licenses: Apache-2.0
apiVersion: v2
appVersion: 6.0.10
dependencies:
- name: common
repository: oci://registry-1.docker.io/bitnamicharts
tags:
- bitnami-common
version: 2.x.x
description: MongoDB(R) is a relational open source NoSQL database. Easy to use, it
stores data in JSON-like documents. Automated scalability and high-performance.
Ideal for developing cloud native applications.
home: https://bitnami.com
icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png
keywords:
- mongodb
- database
- nosql
- cluster
- replicaset
- replication
maintainers:
- name: VMware, Inc.
url: https://github.com/bitnami/charts
name: mongodb
sources:
- https://github.com/bitnami/charts/tree/main/bitnami/mongodb
version: 13.18.5

View File

@@ -0,0 +1,864 @@
<!--- app-name: MongoDB&reg; -->
# MongoDB(R) packaged by Bitnami
MongoDB(R) is a relational open source NoSQL database. Easy to use, it stores data in JSON-like documents. Automated scalability and high-performance. Ideal for developing cloud native applications.
[Overview of MongoDB&reg;](http://www.mongodb.org)
Disclaimer: The respective trademarks mentioned in the offering are owned by the respective companies. We do not provide a commercial license for any of these products. This listing has an open-source license. MongoDB(R) is run and maintained by MongoDB, which is a completely separate project from Bitnami.
## TL;DR
```console
helm install my-release oci://registry-1.docker.io/bitnamicharts/mongodb
```
## Introduction
This chart bootstraps a [MongoDB(&reg;)](https://github.com/bitnami/containers/tree/main/bitnami/mongodb) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
Looking to use MongoDBreg; in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.2.0+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install my-release oci://registry-1.docker.io/bitnamicharts/mongodb
```
The command deploys MongoDB(&reg;) on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Architecture
This chart allows installing MongoDB(&reg;) using two different architecture setups: `standalone` or `replicaset`. Use the `architecture` parameter to choose the one to use:
```console
architecture="standalone"
architecture="replicaset"
```
Refer to the [chart documentation for more information on each of these architectures](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/get-started/understand-architecture/).
## Parameters
### Global parameters
| Name | Description | Value |
| -------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----- |
| `global.imageRegistry` | Global Docker image registry | `""` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
| `global.namespaceOverride` | Override the namespace for resource deployed by the chart, but can itself be overridden by the local namespaceOverride | `""` |
### Common parameters
| Name | Description | Value |
| ------------------------- | --------------------------------------------------------------------------------------------------------- | --------------- |
| `nameOverride` | String to partially override mongodb.fullname template (will maintain the release name) | `""` |
| `fullnameOverride` | String to fully override mongodb.fullname template | `""` |
| `namespaceOverride` | String to fully override common.names.namespace | `""` |
| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` |
| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` |
| `extraDeploy` | Array of extra objects to deploy with the release | `[]` |
| `commonLabels` | Add labels to all the deployed resources (sub-charts are not considered). Evaluated as a template | `{}` |
| `commonAnnotations` | Common annotations to add to all Mongo resources (sub-charts are not considered). Evaluated as a template | `{}` |
| `topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` |
| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` |
| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` |
| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` |
| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` |
### MongoDB(&reg;) parameters
| Name | Description | Value |
| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `image.registry` | MongoDB(&reg;) image registry | `docker.io` |
| `image.repository` | MongoDB(&reg;) image registry | `bitnami/mongodb` |
| `image.tag` | MongoDB(&reg;) image tag (immutable tags are recommended) | `6.0.10-debian-11-r8` |
| `image.digest` | MongoDB(&reg;) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `image.pullPolicy` | MongoDB(&reg;) image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `image.debug` | Set to true if you would like to see extra information on logs | `false` |
| `schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` |
| `architecture` | MongoDB(&reg;) architecture (`standalone` or `replicaset`) | `standalone` |
| `useStatefulSet` | Set to true to use a StatefulSet instead of a Deployment (only when `architecture=standalone`) | `false` |
| `auth.enabled` | Enable authentication | `true` |
| `auth.rootUser` | MongoDB(&reg;) root user | `root` |
| `auth.rootPassword` | MongoDB(&reg;) root password | `""` |
| `auth.usernames` | List of custom users to be created during the initialization | `[]` |
| `auth.passwords` | List of passwords for the custom users set at `auth.usernames` | `[]` |
| `auth.databases` | List of custom databases to be created during the initialization | `[]` |
| `auth.username` | DEPRECATED: use `auth.usernames` instead | `""` |
| `auth.password` | DEPRECATED: use `auth.passwords` instead | `""` |
| `auth.database` | DEPRECATED: use `auth.databases` instead | `""` |
| `auth.replicaSetKey` | Key used for authentication in the replicaset (only when `architecture=replicaset`) | `""` |
| `auth.existingSecret` | Existing secret with MongoDB(&reg;) credentials (keys: `mongodb-passwords`, `mongodb-root-password`, `mongodb-metrics-password`, `mongodb-replica-set-key`) | `""` |
| `tls.enabled` | Enable MongoDB(&reg;) TLS support between nodes in the cluster as well as between mongo clients and nodes | `false` |
| `tls.autoGenerated` | Generate a custom CA and self-signed certificates | `true` |
| `tls.existingSecret` | Existing secret with TLS certificates (keys: `mongodb-ca-cert`, `mongodb-ca-key`) | `""` |
| `tls.caCert` | Custom CA certificated (base64 encoded) | `""` |
| `tls.caKey` | CA certificate private key (base64 encoded) | `""` |
| `tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` |
| `tls.standalone.existingSecret` | Existing secret with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `""` |
| `tls.replicaset.existingSecrets` | Array of existing secrets with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `[]` |
| `tls.hidden.existingSecrets` | Array of existing secrets with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `[]` |
| `tls.arbiter.existingSecret` | Existing secret with TLS certificates (`tls.key`, `tls.crt`, `ca.crt`) or (`tls.key`, `tls.crt`) with tls.pemChainIncluded set as enabled. | `""` |
| `tls.image.registry` | Init container TLS certs setup image registry | `docker.io` |
| `tls.image.repository` | Init container TLS certs setup image repository | `bitnami/nginx` |
| `tls.image.tag` | Init container TLS certs setup image tag (immutable tags are recommended) | `1.25.2-debian-11-r32` |
| `tls.image.digest` | Init container TLS certs setup image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `tls.image.pullPolicy` | Init container TLS certs setup image pull policy | `IfNotPresent` |
| `tls.image.pullSecrets` | Init container TLS certs specify docker-registry secret names as an array | `[]` |
| `tls.extraDnsNames` | Add extra dns names to the CA, can solve x509 auth issue for pod clients | `[]` |
| `tls.mode` | Allows to set the tls mode which should be used when tls is enabled (options: `allowTLS`, `preferTLS`, `requireTLS`) | `requireTLS` |
| `tls.resources.limits` | Init container generate-tls-certs resource limits | `{}` |
| `tls.resources.requests` | Init container generate-tls-certs resource requests | `{}` |
| `hostAliases` | Add deployment host aliases | `[]` |
| `replicaSetName` | Name of the replica set (only when `architecture=replicaset`) | `rs0` |
| `replicaSetHostnames` | Enable DNS hostnames in the replicaset config (only when `architecture=replicaset`) | `true` |
| `enableIPv6` | Switch to enable/disable IPv6 on MongoDB(&reg;) | `false` |
| `directoryPerDB` | Switch to enable/disable DirectoryPerDB on MongoDB(&reg;) | `false` |
| `systemLogVerbosity` | MongoDB(&reg;) system log verbosity level | `0` |
| `disableSystemLog` | Switch to enable/disable MongoDB(&reg;) system log | `false` |
| `disableJavascript` | Switch to enable/disable MongoDB(&reg;) server-side JavaScript execution | `false` |
| `enableJournal` | Switch to enable/disable MongoDB(&reg;) Journaling | `true` |
| `configuration` | MongoDB(&reg;) configuration file to be used for Primary and Secondary nodes | `""` |
### replicaSetConfigurationSettings settings applied during runtime (not via configuration file)
| Name | Description | Value |
| ----------------------------------------------- | --------------------------------------------------------------------------------------------------- | ------- |
| `replicaSetConfigurationSettings.enabled` | Enable MongoDB(&reg;) Switch to enable/disable configuring MongoDB(&reg;) run time rs.conf settings | `false` |
| `replicaSetConfigurationSettings.configuration` | run-time rs.conf settings | `{}` |
| `existingConfigmap` | Name of existing ConfigMap with MongoDB(&reg;) configuration for Primary and Secondary nodes | `""` |
| `initdbScripts` | Dictionary of initdb scripts | `{}` |
| `initdbScriptsConfigMap` | Existing ConfigMap with custom initdb scripts | `""` |
| `command` | Override default container command (useful when using custom images) | `[]` |
| `args` | Override default container args (useful when using custom images) | `[]` |
| `extraFlags` | MongoDB(&reg;) additional command line flags | `[]` |
| `extraEnvVars` | Extra environment variables to add to MongoDB(&reg;) pods | `[]` |
| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` |
### MongoDB(&reg;) statefulset parameters
| Name | Description | Value |
| --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | ---------------- |
| `annotations` | Additional labels to be added to the MongoDB(&reg;) statefulset. Evaluated as a template | `{}` |
| `labels` | Annotations to be added to the MongoDB(&reg;) statefulset. Evaluated as a template | `{}` |
| `replicaCount` | Number of MongoDB(&reg;) nodes (only when `architecture=replicaset`) | `2` |
| `updateStrategy.type` | Strategy to use to replace existing MongoDB(&reg;) pods. When architecture=standalone and useStatefulSet=false, | `RollingUpdate` |
| `podManagementPolicy` | Pod management policy for MongoDB(&reg;) | `OrderedReady` |
| `podAffinityPreset` | MongoDB(&reg;) Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `podAntiAffinityPreset` | MongoDB(&reg;) Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `nodeAffinityPreset.type` | MongoDB(&reg;) Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `nodeAffinityPreset.key` | MongoDB(&reg;) Node label key to match Ignored if `affinity` is set. | `""` |
| `nodeAffinityPreset.values` | MongoDB(&reg;) Node label values to match. Ignored if `affinity` is set. | `[]` |
| `affinity` | MongoDB(&reg;) Affinity for pod assignment | `{}` |
| `nodeSelector` | MongoDB(&reg;) Node labels for pod assignment | `{}` |
| `tolerations` | MongoDB(&reg;) Tolerations for pod assignment | `[]` |
| `topologySpreadConstraints` | MongoDB(&reg;) Spread Constraints for Pods | `[]` |
| `lifecycleHooks` | LifecycleHook for the MongoDB(&reg;) container(s) to automate configuration before or after startup | `{}` |
| `terminationGracePeriodSeconds` | MongoDB(&reg;) Termination Grace Period | `""` |
| `podLabels` | MongoDB(&reg;) pod labels | `{}` |
| `podAnnotations` | MongoDB(&reg;) Pod annotations | `{}` |
| `priorityClassName` | Name of the existing priority class to be used by MongoDB(&reg;) pod(s) | `""` |
| `runtimeClassName` | Name of the runtime class to be used by MongoDB(&reg;) pod(s) | `""` |
| `podSecurityContext.enabled` | Enable MongoDB(&reg;) pod(s)' Security Context | `true` |
| `podSecurityContext.fsGroup` | Group ID for the volumes of the MongoDB(&reg;) pod(s) | `1001` |
| `podSecurityContext.sysctls` | sysctl settings of the MongoDB(&reg;) pod(s)' | `[]` |
| `containerSecurityContext.enabled` | Enable MongoDB(&reg;) container(s)' Security Context | `true` |
| `containerSecurityContext.runAsUser` | User ID for the MongoDB(&reg;) container | `1001` |
| `containerSecurityContext.runAsGroup` | Group ID for the MongoDB(&reg;) container | `0` |
| `containerSecurityContext.runAsNonRoot` | Set MongoDB(&reg;) container's Security Context runAsNonRoot | `true` |
| `containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate MongoDB(&reg;) pod(s) privileges | `false` |
| `containerSecurityContext.seccompProfile.type` | Set MongoDB(&reg;) container's Security Context seccompProfile type | `RuntimeDefault` |
| `containerSecurityContext.capabilities.drop` | Set MongoDB(&reg;) container's Security Context capabilities to drop | `["ALL"]` |
| `resources.limits` | The resources limits for MongoDB(&reg;) containers | `{}` |
| `resources.requests` | The requested resources for MongoDB(&reg;) containers | `{}` |
| `containerPorts.mongodb` | MongoDB(&reg;) container port | `27017` |
| `livenessProbe.enabled` | Enable livenessProbe | `true` |
| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` |
| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `readinessProbe.enabled` | Enable readinessProbe | `true` |
| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` |
| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` |
| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `startupProbe.enabled` | Enable startupProbe | `false` |
| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` |
| `startupProbe.periodSeconds` | Period seconds for startupProbe | `20` |
| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `10` |
| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` |
| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `customLivenessProbe` | Override default liveness probe for MongoDB(&reg;) containers | `{}` |
| `customReadinessProbe` | Override default readiness probe for MongoDB(&reg;) containers | `{}` |
| `customStartupProbe` | Override default startup probe for MongoDB(&reg;) containers | `{}` |
| `initContainers` | Add additional init containers for the hidden node pod(s) | `[]` |
| `sidecars` | Add additional sidecar containers for the MongoDB(&reg;) pod(s) | `[]` |
| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the MongoDB(&reg;) container(s) | `[]` |
| `extraVolumes` | Optionally specify extra list of additional volumes to the MongoDB(&reg;) statefulset | `[]` |
| `pdb.create` | Enable/disable a Pod Disruption Budget creation for MongoDB(&reg;) pod(s) | `false` |
| `pdb.minAvailable` | Minimum number/percentage of MongoDB(&reg;) pods that must still be available after the eviction | `1` |
| `pdb.maxUnavailable` | Maximum number/percentage of MongoDB(&reg;) pods that may be made unavailable after the eviction | `""` |
### Traffic exposure parameters
| Name | Description | Value |
| ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- |
| `service.nameOverride` | MongoDB(&reg;) service name | `""` |
| `service.type` | Kubernetes Service type (only for standalone architecture) | `ClusterIP` |
| `service.portName` | MongoDB(&reg;) service port name (only for standalone architecture) | `mongodb` |
| `service.ports.mongodb` | MongoDB(&reg;) service port. | `27017` |
| `service.nodePorts.mongodb` | Port to bind to for NodePort and LoadBalancer service types (only for standalone architecture) | `""` |
| `service.clusterIP` | MongoDB(&reg;) service cluster IP (only for standalone architecture) | `""` |
| `service.externalIPs` | Specify the externalIP value ClusterIP service type (only for standalone architecture) | `[]` |
| `service.loadBalancerIP` | loadBalancerIP for MongoDB(&reg;) Service (only for standalone architecture) | `""` |
| `service.loadBalancerClass` | loadBalancerClass for MongoDB(&reg;) Service (only for standalone architecture) | `""` |
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer (only for standalone architecture) | `[]` |
| `service.allocateLoadBalancerNodePorts` | Wheter to allocate node ports when service type is LoadBalancer | `true` |
| `service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `service.annotations` | Provide any additional annotations that may be required | `{}` |
| `service.externalTrafficPolicy` | service external traffic policy (only for standalone architecture) | `Local` |
| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `service.headless.annotations` | Annotations for the headless service. | `{}` |
| `externalAccess.enabled` | Enable Kubernetes external cluster access to MongoDB(&reg;) nodes (only for replicaset architecture) | `false` |
| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs by querying the K8s API | `false` |
| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` |
| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.14-debian-11-r5` |
| `externalAccess.autoDiscovery.image.digest` | Init container auto-discovery image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` |
| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` |
| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` |
| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` |
| `externalAccess.externalMaster.enabled` | Use external master for bootstrapping | `false` |
| `externalAccess.externalMaster.host` | External master host to bootstrap from | `""` |
| `externalAccess.externalMaster.port` | Port for MongoDB(&reg;) service external master host | `27017` |
| `externalAccess.service.type` | Kubernetes Service type for external access. Allowed values: NodePort, LoadBalancer or ClusterIP | `LoadBalancer` |
| `externalAccess.service.portName` | MongoDB(&reg;) port name used for external access when service type is LoadBalancer | `mongodb` |
| `externalAccess.service.ports.mongodb` | MongoDB(&reg;) port used for external access when service type is LoadBalancer | `27017` |
| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB(&reg;) nodes | `[]` |
| `externalAccess.service.loadBalancerClass` | loadBalancerClass when service type is LoadBalancer | `""` |
| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `externalAccess.service.allocateLoadBalancerNodePorts` | Wheter to allocate node ports when service type is LoadBalancer | `true` |
| `externalAccess.service.externalTrafficPolicy` | MongoDB(&reg;) service external traffic policy | `Local` |
| `externalAccess.service.nodePorts` | Array of node ports used to configure MongoDB(&reg;) advertised hostname when service type is NodePort | `[]` |
| `externalAccess.service.domain` | Domain or external IP used to configure MongoDB(&reg;) advertised hostname when service type is NodePort | `""` |
| `externalAccess.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `externalAccess.service.annotations` | Service annotations for external access | `{}` |
| `externalAccess.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
| `externalAccess.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
| `externalAccess.hidden.enabled` | Enable Kubernetes external cluster access to MongoDB(&reg;) hidden nodes | `false` |
| `externalAccess.hidden.service.type` | Kubernetes Service type for external access. Allowed values: NodePort or LoadBalancer | `LoadBalancer` |
| `externalAccess.hidden.service.portName` | MongoDB(&reg;) port name used for external access when service type is LoadBalancer | `mongodb` |
| `externalAccess.hidden.service.ports.mongodb` | MongoDB(&reg;) port used for external access when service type is LoadBalancer | `27017` |
| `externalAccess.hidden.service.loadBalancerIPs` | Array of load balancer IPs for MongoDB(&reg;) nodes | `[]` |
| `externalAccess.hidden.service.loadBalancerClass` | loadBalancerClass when service type is LoadBalancer | `""` |
| `externalAccess.hidden.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `externalAccess.hidden.service.allocateLoadBalancerNodePorts` | Wheter to allocate node ports when service type is LoadBalancer | `true` |
| `externalAccess.hidden.service.externalTrafficPolicy` | MongoDB(&reg;) service external traffic policy | `Local` |
| `externalAccess.hidden.service.nodePorts` | Array of node ports used to configure MongoDB(&reg;) advertised hostname when service type is NodePort. Length must be the same as replicaCount | `[]` |
| `externalAccess.hidden.service.domain` | Domain or external IP used to configure MongoDB(&reg;) advertised hostname when service type is NodePort | `""` |
| `externalAccess.hidden.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `externalAccess.hidden.service.annotations` | Service annotations for external access | `{}` |
| `externalAccess.hidden.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` |
| `externalAccess.hidden.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
### Persistence parameters
| Name | Description | Value |
| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `persistence.enabled` | Enable MongoDB(&reg;) data persistence using PVC | `true` |
| `persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` |
| `persistence.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` |
| `persistence.storageClass` | PVC Storage Class for MongoDB(&reg;) data volume | `""` |
| `persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` |
| `persistence.size` | PVC Storage Request for MongoDB(&reg;) data volume | `8Gi` |
| `persistence.annotations` | PVC annotations | `{}` |
| `persistence.mountPath` | Path to mount the volume at | `/bitnami/mongodb` |
| `persistence.subPath` | Subdirectory of the volume to mount at | `""` |
| `persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` |
| `persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` |
| `persistence.volumeClaimTemplates.dataSource` | Add dataSource to the VolumeClaimTemplate | `{}` |
### Backup parameters
| Name | Description | Value |
| ------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |
| `backup.enabled` | Enable the logical dump of the database "regularly" | `false` |
| `backup.cronjob.schedule` | Set the cronjob parameter schedule | `@daily` |
| `backup.cronjob.concurrencyPolicy` | Set the cronjob parameter concurrencyPolicy | `Allow` |
| `backup.cronjob.failedJobsHistoryLimit` | Set the cronjob parameter failedJobsHistoryLimit | `1` |
| `backup.cronjob.successfulJobsHistoryLimit` | Set the cronjob parameter successfulJobsHistoryLimit | `3` |
| `backup.cronjob.startingDeadlineSeconds` | Set the cronjob parameter startingDeadlineSeconds | `""` |
| `backup.cronjob.ttlSecondsAfterFinished` | Set the cronjob parameter ttlSecondsAfterFinished | `""` |
| `backup.cronjob.restartPolicy` | Set the cronjob parameter restartPolicy | `OnFailure` |
| `backup.cronjob.containerSecurityContext.runAsUser` | User ID for the backup container | `1001` |
| `backup.cronjob.containerSecurityContext.runAsGroup` | Group ID for the backup container | `0` |
| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set backup container's Security Context runAsNonRoot | `true` |
| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Is the container itself readonly | `true` |
| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate backup pod(s) privileges | `false` |
| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set backup container's Security Context seccompProfile type | `RuntimeDefault` |
| `backup.cronjob.containerSecurityContext.capabilities.drop` | Set backup container's Security Context capabilities to drop | `["ALL"]` |
| `backup.cronjob.command` | Set backup container's command to run | `[]` |
| `backup.cronjob.labels` | Set the cronjob labels | `{}` |
| `backup.cronjob.annotations` | Set the cronjob annotations | `{}` |
| `backup.cronjob.storage.existingClaim` | Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) | `""` |
| `backup.cronjob.storage.resourcePolicy` | Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted | `""` |
| `backup.cronjob.storage.storageClass` | PVC Storage Class for the backup data volume | `""` |
| `backup.cronjob.storage.accessModes` | PV Access Mode | `["ReadWriteOnce"]` |
| `backup.cronjob.storage.size` | PVC Storage Request for the backup data volume | `8Gi` |
| `backup.cronjob.storage.annotations` | PVC annotations | `{}` |
| `backup.cronjob.storage.mountPath` | Path to mount the volume at | `/backup/mongodb` |
| `backup.cronjob.storage.subPath` | Subdirectory of the volume to mount at | `""` |
| `backup.cronjob.storage.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` |
### RBAC parameters
| Name | Description | Value |
| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `serviceAccount.create` | Enable creation of ServiceAccount for MongoDB(&reg;) pods | `true` |
| `serviceAccount.name` | Name of the created serviceAccount | `""` |
| `serviceAccount.annotations` | Additional Service Account annotations | `{}` |
| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` |
| `rbac.create` | Whether to create & use RBAC resources or not | `false` |
| `rbac.rules` | Custom rules to create following the role specification | `[]` |
| `podSecurityPolicy.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` |
| `podSecurityPolicy.allowPrivilegeEscalation` | Enable privilege escalation | `false` |
| `podSecurityPolicy.privileged` | Allow privileged | `false` |
| `podSecurityPolicy.spec` | Specify the full spec to use for Pod Security Policy | `{}` |
### Volume Permissions parameters
| Name | Description | Value |
| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/os-shell` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r72` |
| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
| `volumePermissions.securityContext.runAsUser` | User ID for the volumePermissions container | `0` |
### Arbiter parameters
| Name | Description | Value |
| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ---------------- |
| `arbiter.enabled` | Enable deploying the arbiter | `true` |
| `arbiter.hostAliases` | Add deployment host aliases | `[]` |
| `arbiter.configuration` | Arbiter configuration file to be used | `""` |
| `arbiter.existingConfigmap` | Name of existing ConfigMap with Arbiter configuration | `""` |
| `arbiter.command` | Override default container command (useful when using custom images) | `[]` |
| `arbiter.args` | Override default container args (useful when using custom images) | `[]` |
| `arbiter.extraFlags` | Arbiter additional command line flags | `[]` |
| `arbiter.extraEnvVars` | Extra environment variables to add to Arbiter pods | `[]` |
| `arbiter.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `arbiter.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` |
| `arbiter.annotations` | Additional labels to be added to the Arbiter statefulset | `{}` |
| `arbiter.labels` | Annotations to be added to the Arbiter statefulset | `{}` |
| `arbiter.topologySpreadConstraints` | MongoDB(&reg;) Spread Constraints for arbiter Pods | `[]` |
| `arbiter.lifecycleHooks` | LifecycleHook for the Arbiter container to automate configuration before or after startup | `{}` |
| `arbiter.terminationGracePeriodSeconds` | Arbiter Termination Grace Period | `""` |
| `arbiter.updateStrategy.type` | Strategy that will be employed to update Pods in the StatefulSet | `RollingUpdate` |
| `arbiter.podManagementPolicy` | Pod management policy for MongoDB(&reg;) | `OrderedReady` |
| `arbiter.schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` |
| `arbiter.podAffinityPreset` | Arbiter Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `arbiter.podAntiAffinityPreset` | Arbiter Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `arbiter.nodeAffinityPreset.type` | Arbiter Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `arbiter.nodeAffinityPreset.key` | Arbiter Node label key to match Ignored if `affinity` is set. | `""` |
| `arbiter.nodeAffinityPreset.values` | Arbiter Node label values to match. Ignored if `affinity` is set. | `[]` |
| `arbiter.affinity` | Arbiter Affinity for pod assignment | `{}` |
| `arbiter.nodeSelector` | Arbiter Node labels for pod assignment | `{}` |
| `arbiter.tolerations` | Arbiter Tolerations for pod assignment | `[]` |
| `arbiter.podLabels` | Arbiter pod labels | `{}` |
| `arbiter.podAnnotations` | Arbiter Pod annotations | `{}` |
| `arbiter.priorityClassName` | Name of the existing priority class to be used by Arbiter pod(s) | `""` |
| `arbiter.runtimeClassName` | Name of the runtime class to be used by Arbiter pod(s) | `""` |
| `arbiter.podSecurityContext.enabled` | Enable Arbiter pod(s)' Security Context | `true` |
| `arbiter.podSecurityContext.fsGroup` | Group ID for the volumes of the Arbiter pod(s) | `1001` |
| `arbiter.podSecurityContext.sysctls` | sysctl settings of the Arbiter pod(s)' | `[]` |
| `arbiter.containerSecurityContext.enabled` | Enable Arbiter container(s)' Security Context | `true` |
| `arbiter.containerSecurityContext.runAsUser` | User ID for the Arbiter container | `1001` |
| `arbiter.containerSecurityContext.runAsGroup` | Group ID for the Arbiter container | `0` |
| `arbiter.containerSecurityContext.runAsNonRoot` | Set Arbiter containers' Security Context runAsNonRoot | `true` |
| `arbiter.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate Arbiter pod(s) privileges | `false` |
| `arbiter.containerSecurityContext.seccompProfile.type` | Set Arbiter container's Security Context seccompProfile type | `RuntimeDefault` |
| `arbiter.containerSecurityContext.capabilities.drop` | Set Arbiter container's Security Context capabilities to drop | `["ALL"]` |
| `arbiter.resources.limits` | The resources limits for Arbiter containers | `{}` |
| `arbiter.resources.requests` | The requested resources for Arbiter containers | `{}` |
| `arbiter.containerPorts.mongodb` | MongoDB(&reg;) arbiter container port | `27017` |
| `arbiter.livenessProbe.enabled` | Enable livenessProbe | `true` |
| `arbiter.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `arbiter.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
| `arbiter.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` |
| `arbiter.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `arbiter.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `arbiter.readinessProbe.enabled` | Enable readinessProbe | `true` |
| `arbiter.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `arbiter.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `20` |
| `arbiter.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` |
| `arbiter.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `arbiter.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `arbiter.startupProbe.enabled` | Enable startupProbe | `false` |
| `arbiter.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` |
| `arbiter.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `arbiter.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
| `arbiter.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` |
| `arbiter.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `arbiter.customLivenessProbe` | Override default liveness probe for Arbiter containers | `{}` |
| `arbiter.customReadinessProbe` | Override default readiness probe for Arbiter containers | `{}` |
| `arbiter.customStartupProbe` | Override default startup probe for Arbiter containers | `{}` |
| `arbiter.initContainers` | Add additional init containers for the Arbiter pod(s) | `[]` |
| `arbiter.sidecars` | Add additional sidecar containers for the Arbiter pod(s) | `[]` |
| `arbiter.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Arbiter container(s) | `[]` |
| `arbiter.extraVolumes` | Optionally specify extra list of additional volumes to the Arbiter statefulset | `[]` |
| `arbiter.pdb.create` | Enable/disable a Pod Disruption Budget creation for Arbiter pod(s) | `false` |
| `arbiter.pdb.minAvailable` | Minimum number/percentage of Arbiter pods that should remain scheduled | `1` |
| `arbiter.pdb.maxUnavailable` | Maximum number/percentage of Arbiter pods that may be made unavailable | `""` |
| `arbiter.service.nameOverride` | The arbiter service name | `""` |
| `arbiter.service.ports.mongodb` | MongoDB(&reg;) service port | `27017` |
| `arbiter.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `arbiter.service.annotations` | Provide any additional annotations that may be required | `{}` |
| `arbiter.service.headless.annotations` | Annotations for the headless service. | `{}` |
### Hidden Node parameters
| Name | Description | Value |
| ---------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------- |
| `hidden.enabled` | Enable deploying the hidden nodes | `false` |
| `hidden.hostAliases` | Add deployment host aliases | `[]` |
| `hidden.configuration` | Hidden node configuration file to be used | `""` |
| `hidden.existingConfigmap` | Name of existing ConfigMap with Hidden node configuration | `""` |
| `hidden.command` | Override default container command (useful when using custom images) | `[]` |
| `hidden.args` | Override default container args (useful when using custom images) | `[]` |
| `hidden.extraFlags` | Hidden node additional command line flags | `[]` |
| `hidden.extraEnvVars` | Extra environment variables to add to Hidden node pods | `[]` |
| `hidden.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars | `""` |
| `hidden.extraEnvVarsSecret` | Name of existing Secret containing extra env vars (in case of sensitive data) | `""` |
| `hidden.annotations` | Additional labels to be added to thehidden node statefulset | `{}` |
| `hidden.labels` | Annotations to be added to the hidden node statefulset | `{}` |
| `hidden.topologySpreadConstraints` | MongoDB(&reg;) Spread Constraints for hidden Pods | `[]` |
| `hidden.lifecycleHooks` | LifecycleHook for the Hidden container to automate configuration before or after startup | `{}` |
| `hidden.replicaCount` | Number of hidden nodes (only when `architecture=replicaset`) | `1` |
| `hidden.terminationGracePeriodSeconds` | Hidden Termination Grace Period | `""` |
| `hidden.updateStrategy.type` | Strategy that will be employed to update Pods in the StatefulSet | `RollingUpdate` |
| `hidden.podManagementPolicy` | Pod management policy for hidden node | `OrderedReady` |
| `hidden.schedulerName` | Name of the scheduler (other than default) to dispatch pods | `""` |
| `hidden.podAffinityPreset` | Hidden node Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `hidden.podAntiAffinityPreset` | Hidden node Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` |
| `hidden.nodeAffinityPreset.type` | Hidden Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` |
| `hidden.nodeAffinityPreset.key` | Hidden Node label key to match Ignored if `affinity` is set. | `""` |
| `hidden.nodeAffinityPreset.values` | Hidden Node label values to match. Ignored if `affinity` is set. | `[]` |
| `hidden.affinity` | Hidden node Affinity for pod assignment | `{}` |
| `hidden.nodeSelector` | Hidden node Node labels for pod assignment | `{}` |
| `hidden.tolerations` | Hidden node Tolerations for pod assignment | `[]` |
| `hidden.podLabels` | Hidden node pod labels | `{}` |
| `hidden.podAnnotations` | Hidden node Pod annotations | `{}` |
| `hidden.priorityClassName` | Name of the existing priority class to be used by hidden node pod(s) | `""` |
| `hidden.runtimeClassName` | Name of the runtime class to be used by hidden node pod(s) | `""` |
| `hidden.podSecurityContext.enabled` | Enable Hidden pod(s)' Security Context | `true` |
| `hidden.podSecurityContext.fsGroup` | Group ID for the volumes of the Hidden pod(s) | `1001` |
| `hidden.podSecurityContext.sysctls` | sysctl settings of the Hidden pod(s)' | `[]` |
| `hidden.containerSecurityContext.enabled` | Enable Hidden container(s)' Security Context | `true` |
| `hidden.containerSecurityContext.runAsUser` | User ID for the Hidden container | `1001` |
| `hidden.containerSecurityContext.runAsGroup` | Group ID for the Hidden container | `0` |
| `hidden.containerSecurityContext.runAsNonRoot` | Set Hidden containers' Security Context runAsNonRoot | `true` |
| `hidden.containerSecurityContext.allowPrivilegeEscalation` | Set Hidden containers' Security Context allowPrivilegeEscalation | `false` |
| `hidden.containerSecurityContext.seccompProfile.type` | Set Hidden container's Security Context seccompProfile type | `RuntimeDefault` |
| `hidden.containerSecurityContext.capabilities.drop` | Set Hidden container's Security Context capabilities to drop | `["ALL"]` |
| `hidden.resources.limits` | The resources limits for hidden node containers | `{}` |
| `hidden.resources.requests` | The requested resources for hidden node containers | `{}` |
| `hidden.containerPorts.mongodb` | MongoDB(&reg;) hidden container port | `27017` |
| `hidden.livenessProbe.enabled` | Enable livenessProbe | `true` |
| `hidden.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` |
| `hidden.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `20` |
| `hidden.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` |
| `hidden.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` |
| `hidden.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `hidden.readinessProbe.enabled` | Enable readinessProbe | `true` |
| `hidden.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `hidden.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `20` |
| `hidden.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` |
| `hidden.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` |
| `hidden.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `hidden.startupProbe.enabled` | Enable startupProbe | `false` |
| `hidden.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` |
| `hidden.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `hidden.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
| `hidden.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` |
| `hidden.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `hidden.customLivenessProbe` | Override default liveness probe for hidden node containers | `{}` |
| `hidden.customReadinessProbe` | Override default readiness probe for hidden node containers | `{}` |
| `hidden.customStartupProbe` | Override default startup probe for MongoDB(&reg;) containers | `{}` |
| `hidden.initContainers` | Add init containers to the MongoDB(&reg;) Hidden pods. | `[]` |
| `hidden.sidecars` | Add additional sidecar containers for the hidden node pod(s) | `[]` |
| `hidden.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the hidden node container(s) | `[]` |
| `hidden.extraVolumes` | Optionally specify extra list of additional volumes to the hidden node statefulset | `[]` |
| `hidden.pdb.create` | Enable/disable a Pod Disruption Budget creation for hidden node pod(s) | `false` |
| `hidden.pdb.minAvailable` | Minimum number/percentage of hidden node pods that should remain scheduled | `1` |
| `hidden.pdb.maxUnavailable` | Maximum number/percentage of hidden node pods that may be made unavailable | `""` |
| `hidden.persistence.enabled` | Enable hidden node data persistence using PVC | `true` |
| `hidden.persistence.medium` | Provide a medium for `emptyDir` volumes. | `""` |
| `hidden.persistence.storageClass` | PVC Storage Class for hidden node data volume | `""` |
| `hidden.persistence.accessModes` | PV Access Mode | `["ReadWriteOnce"]` |
| `hidden.persistence.size` | PVC Storage Request for hidden node data volume | `8Gi` |
| `hidden.persistence.annotations` | PVC annotations | `{}` |
| `hidden.persistence.mountPath` | The path the volume will be mounted at, useful when using different MongoDB(&reg;) images. | `/bitnami/mongodb` |
| `hidden.persistence.subPath` | The subdirectory of the volume to mount to, useful in dev environments | `""` |
| `hidden.persistence.volumeClaimTemplates.selector` | A label query over volumes to consider for binding (e.g. when using local volumes) | `{}` |
| `hidden.persistence.volumeClaimTemplates.requests` | Custom PVC requests attributes | `{}` |
| `hidden.persistence.volumeClaimTemplates.dataSource` | Set volumeClaimTemplate dataSource | `{}` |
| `hidden.service.portName` | MongoDB(&reg;) service port name | `mongodb` |
| `hidden.service.ports.mongodb` | MongoDB(&reg;) service port | `27017` |
| `hidden.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `hidden.service.annotations` | Provide any additional annotations that may be required | `{}` |
| `hidden.service.headless.annotations` | Annotations for the headless service. | `{}` |
### Metrics parameters
| Name | Description | Value |
| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | -------------------------- |
| `metrics.enabled` | Enable using a sidecar Prometheus exporter | `false` |
| `metrics.image.registry` | MongoDB(&reg;) Prometheus exporter image registry | `docker.io` |
| `metrics.image.repository` | MongoDB(&reg;) Prometheus exporter image repository | `bitnami/mongodb-exporter` |
| `metrics.image.tag` | MongoDB(&reg;) Prometheus exporter image tag (immutable tags are recommended) | `0.39.0-debian-11-r106` |
| `metrics.image.digest` | MongoDB(&reg;) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
| `metrics.image.pullPolicy` | MongoDB(&reg;) Prometheus exporter image pull policy | `IfNotPresent` |
| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` |
| `metrics.username` | String with username for the metrics exporter | `""` |
| `metrics.password` | String with password for the metrics exporter | `""` |
| `metrics.compatibleMode` | Enables old style mongodb-exporter metrics | `true` |
| `metrics.collector.all` | Enable all collectors. Same as enabling all individual metrics | `false` |
| `metrics.collector.diagnosticdata` | Boolean Enable collecting metrics from getDiagnosticData | `true` |
| `metrics.collector.replicasetstatus` | Boolean Enable collecting metrics from replSetGetStatus | `true` |
| `metrics.collector.dbstats` | Boolean Enable collecting metrics from dbStats | `false` |
| `metrics.collector.topmetrics` | Boolean Enable collecting metrics from top admin command | `false` |
| `metrics.collector.indexstats` | Boolean Enable collecting metrics from $indexStats | `false` |
| `metrics.collector.collstats` | Boolean Enable collecting metrics from $collStats | `false` |
| `metrics.collector.collstatsColls` | List of \<databases\>.\<collections\> to get $collStats | `[]` |
| `metrics.collector.indexstatsColls` | List - List of \<databases\>.\<collections\> to get $indexStats | `[]` |
| `metrics.collector.collstatsLimit` | Number - Disable collstats, dbstats, topmetrics and indexstats collector if there are more than \<n\> collections. 0=No limit | `0` |
| `metrics.extraFlags` | String with extra flags to the metrics exporter | `""` |
| `metrics.command` | Override default container command (useful when using custom images) | `[]` |
| `metrics.args` | Override default container args (useful when using custom images) | `[]` |
| `metrics.resources.limits` | The resources limits for Prometheus exporter containers | `{}` |
| `metrics.resources.requests` | The requested resources for Prometheus exporter containers | `{}` |
| `metrics.containerPort` | Port of the Prometheus metrics container | `9216` |
| `metrics.service.annotations` | Annotations for Prometheus Exporter pods. Evaluated as a template. | `{}` |
| `metrics.service.type` | Type of the Prometheus metrics service | `ClusterIP` |
| `metrics.service.ports.metrics` | Port of the Prometheus metrics service | `9216` |
| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
| `metrics.livenessProbe.enabled` | Enable livenessProbe | `true` |
| `metrics.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `15` |
| `metrics.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `5` |
| `metrics.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `10` |
| `metrics.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` |
| `metrics.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` |
| `metrics.readinessProbe.enabled` | Enable readinessProbe | `true` |
| `metrics.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` |
| `metrics.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `5` |
| `metrics.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `10` |
| `metrics.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` |
| `metrics.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` |
| `metrics.startupProbe.enabled` | Enable startupProbe | `false` |
| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` |
| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
| `metrics.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` |
| `metrics.startupProbe.failureThreshold` | Failure threshold for startupProbe | `30` |
| `metrics.startupProbe.successThreshold` | Success threshold for startupProbe | `1` |
| `metrics.customLivenessProbe` | Override default liveness probe for MongoDB(&reg;) containers | `{}` |
| `metrics.customReadinessProbe` | Override default readiness probe for MongoDB(&reg;) containers | `{}` |
| `metrics.customStartupProbe` | Override default startup probe for MongoDB(&reg;) containers | `{}` |
| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the metrics container(s) | `[]` |
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` |
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `30s` |
| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `""` |
| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` |
| `metrics.serviceMonitor.metricRelabelings` | MetricsRelabelConfigs to apply to samples before ingestion. | `[]` |
| `metrics.serviceMonitor.labels` | Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with | `{}` |
| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` |
| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` |
| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` |
| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` |
| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` |
| `metrics.prometheusRule.namespace` | Namespace where prometheusRules resource should be created | `""` |
| `metrics.prometheusRule.rules` | Rules to be created, check values for an example | `[]` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release \
--set auth.rootPassword=secretpassword,auth.username=my-user,auth.password=my-password,auth.database=my-database \
oci://registry-1.docker.io/bitnamicharts/mongodb
```
The above command sets the MongoDB(&reg;) `root` account password to `secretpassword`. Additionally, it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`.
> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/mongodb
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Customize a new MongoDB instance
The [Bitnami MongoDB(&reg;) image](https://github.com/bitnami/containers/tree/main/bitnami/mongodb) supports the use of custom scripts to initialize a fresh instance. In order to execute the scripts, two options are available:
- Specify them using the `initdbScripts` parameter as dict.
- Define an external Kubernetes ConfigMap with all the initialization scripts by setting the `initdbScriptsConfigMap` parameter. Note that this will override the previous option.
The allowed script extensions are `.sh` and `.js`.
### Replicaset: Access MongoDB(&reg;) nodes from outside the cluster
In order to access MongoDB(&reg;) nodes from outside the cluster when using a replicaset architecture, a specific service per MongoDB(&reg;) pod will be created. There are two ways of configuring external access:
- Using LoadBalancer services
- Using NodePort services.
Refer to the [chart documentation for more details and configuration examples](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/configuration/configure-external-access-replicaset/).
### Bootstrapping with an External Cluster
This chart is equipped with the ability to bring online a set of Pods that connect to an existing MongoDB(&reg;) deployment that lies outside of Kubernetes. This effectively creates a hybrid MongoDB(&reg;) Deployment where both Pods in Kubernetes and Instances such as Virtual Machines can partake in a single MongoDB(&reg;) Deployment. This is helpful in situations where one may be migrating MongoDB(&reg;) from Virtual Machines into Kubernetes, for example. To take advantage of this, use the following as an example configuration:
```yaml
externalAccess:
externalMaster:
enabled: true
host: external-mongodb-0.internal
```
:warning: To bootstrap MongoDB(&reg;) with an external master that lies outside of Kubernetes, be sure to set up external access using any of the suggested methods in this chart to have connectivity between the MongoDB(&reg;) members. :warning:
### Add extra environment variables
To add extra environment variables (useful for advanced operations like custom init scripts), use the `extraEnvVars` property.
```yaml
extraEnvVars:
- name: LOG_LEVEL
value: error
```
Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` properties.
### Use Sidecars and Init Containers
If additional containers are needed in the same pod (such as additional metrics or logging exporters), they can be defined using the `sidecars` config parameter. Similarly, extra init containers can be added using the `initContainers` parameter.
Refer to the chart documentation for more information on, and examples of, configuring and using [sidecars and init containers](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/configuration/configure-sidecar-init-containers/).
## Persistence
The [Bitnami MongoDB(&reg;)](https://github.com/bitnami/containers/tree/main/bitnami/mongodb) image stores the MongoDB(&reg;) data and configurations at the `/bitnami/mongodb` path of the container.
The chart mounts a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning.
If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/).
## Use custom Prometheus rules
Custom Prometheus rules can be defined for the Prometheus Operator by using the `prometheusRule` parameter.
Refer to the [chart documentation for an example of a custom rule](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/use-prometheus-rules/).
## Enable SSL/TLS
This chart supports enabling SSL/TLS between nodes in the cluster, as well as between MongoDB(&reg;) clients and nodes, by setting the `MONGODB_EXTRA_FLAGS` and `MONGODB_CLIENT_EXTRA_FLAGS` container environment variables, together with the correct `MONGODB_ADVERTISED_HOSTNAME`. To enable full TLS encryption, set the `tls.enabled` parameter to `true`.
Refer to the [chart documentation for more information on enabling TLS](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/enable-tls/).
### Set Pod affinity
This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity).
As an alternative, you can use the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters.
## Troubleshooting
Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues).
## Upgrading
If authentication is enabled, it's necessary to set the `auth.rootPassword` (also `auth.replicaSetKey` when using a replicaset architecture) when upgrading for readiness/liveness probes to work properly. When you install this chart for the first time, some notes will be displayed providing the credentials you must use under the 'Credentials' section. Please note down the password, and run the command below to upgrade your chart:
```console
helm upgrade my-release oci://registry-1.docker.io/bitnamicharts/mongodb --set auth.rootPassword=[PASSWORD] (--set auth.replicaSetKey=[REPLICASETKEY])
```
> Note: you need to substitute the placeholders [PASSWORD] and [REPLICASETKEY] with the values obtained in the installation notes.
### To 12.0.0
This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository.
Affected values:
- `strategyType` is replaced by `updateStrategy`
- `service.port` is renamed to `service.ports.mongodb`
- `service.nodePort` is renamed to `service.nodePorts.mongodb`
- `externalAccess.service.port` is renamed to `externalAccess.hidden.service.ports.mongodb`
- `rbac.role.rules` is renamed to `rbac.rules`
- `externalAccess.hidden.service.port` is renamed ot `externalAccess.hidden.service.ports.mongodb`
- `hidden.strategyType` is replaced by `hidden.updateStrategy`
- `metrics.serviceMonitor.relabellings` is renamed to `metrics.serviceMonitor.relabelings`(typo fixed)
- `metrics.serviceMonitor.additionalLabels` is renamed to `metrics.serviceMonitor.labels`
Additionally also updates the MongoDB image dependency to it newest major, 5.0
### To 11.0.0
In this version, the mongodb-exporter bundled as part of this Helm chart was updated to a new version which, even it is not a major change, can contain breaking changes (from `0.11.X` to `0.30.X`).
Please visit the release notes from the upstream project at <https://github.com/percona/mongodb_exporter/releases>
### To 10.0.0
[On November 13, 2020, Helm v2 support formally ended](https://github.com/helm/charts#status-of-the-project). This major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/mongodb/administration/upgrade-helm3/).
### To 9.0.0
MongoDB(&reg;) container images were updated to `4.4.x` and it can affect compatibility with older versions of MongoDB(&reg;). Refer to the following guides to upgrade your applications:
- [Standalone](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-standalone/)
- [Replica Set](https://docs.mongodb.com/manual/release-notes/4.4-upgrade-replica-set/)
### To 8.0.0
- Architecture used to configure MongoDB(&reg;) as a replicaset was completely refactored. Now, both primary and secondary nodes are part of the same statefulset.
- Chart labels were adapted to follow the Helm charts best practices.
- This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade.
- Several parameters were renamed or disappeared in favor of new ones on this major version. These are the most important ones:
- `replicas` is renamed to `replicaCount`.
- Authentication parameters are reorganized under the `auth.*` parameter:
- `usePassword` is renamed to `auth.enabled`.
- `mongodbRootPassword`, `mongodbUsername`, `mongodbPassword`, `mongodbDatabase`, and `replicaSet.key` are now `auth.rootPassword`, `auth.username`, `auth.password`, `auth.database`, and `auth.replicaSetKey` respectively.
- `securityContext.*` is deprecated in favor of `podSecurityContext` and `containerSecurityContext`.
- Parameters prefixed with `mongodb` are renamed removing the prefix. E.g. `mongodbEnableIPv6` is renamed to `enableIPv6`.
- Parameters affecting Arbiter nodes are reorganized under the `arbiter.*` parameter.
Consequences:
- Backwards compatibility is not guaranteed. To upgrade to `8.0.0`, install a new release of the MongoDB(&reg;) chart, and migrate your data by creating a backup of the database, and restoring it on the new release.
### To 7.0.0
From this version, the way of setting the ingress rules has changed. Instead of using `ingress.paths` and `ingress.hosts` as separate objects, you should now define the rules as objects inside the `ingress.hosts` value, for example:
```yaml
ingress:
hosts:
- name: mongodb.local
path: /
```
### To 6.0.0
From this version, `mongodbEnableIPv6` is set to `false` by default in order to work properly in most k8s clusters, if you want to use IPv6 support, you need to set this variable to `true` by adding `--set mongodbEnableIPv6=true` to your `helm` command.
You can find more information in the [`bitnami/mongodb` image README](https://github.com/bitnami/containers/tree/main/bitnami/mongodb#readme).
### To 5.0.0
When enabling replicaset configuration, backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets.
Use the workaround below to upgrade from versions previous to 5.0.0. The following example assumes that the release name is `my-release`:
```console
kubectl delete statefulset my-release-mongodb-arbiter my-release-mongodb-primary my-release-mongodb-secondary --cascade=false
```
### Add extra deployment options
To add extra deployments (useful for advanced features like sidecars), use the `extraDeploy` property.
In the example below, you can find how to use a example here for a [MongoDB replica set pod labeler sidecar](https://github.com/combor/k8s-mongo-labeler-sidecar) to identify the primary pod and dynamically label it as the primary node:
```yaml
extraDeploy:
- apiVersion: v1
kind: Service
metadata:
name: mongodb-primary
namespace: default
labels:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: mongodb
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: mongodb
spec:
type: NodePort
externalTrafficPolicy: Cluster
ports:
- name: mongodb-primary
port: 30001
nodePort: 30001
protocol: TCP
targetPort: mongodb
selector:
app.kubernetes.io/component: mongodb
app.kubernetes.io/instance: mongodb
app.kubernetes.io/name: mongodb
primary: "true"
```
## License
Copyright &copy; 2023 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
annotations:
category: Infrastructure
licenses: Apache-2.0
apiVersion: v2
appVersion: 2.11.1
description: A Library Helm Chart for grouping common logic between bitnami charts.
This chart is not deployable by itself.
home: https://bitnami.com
icon: https://bitnami.com/downloads/logos/bitnami-mark.png
keywords:
- common
- helper
- template
- function
- bitnami
maintainers:
- name: VMware, Inc.
url: https://github.com/bitnami/charts
name: common
sources:
- https://github.com/bitnami/charts
type: library
version: 2.11.1

View File

@@ -0,0 +1,235 @@
# Bitnami Common Library Chart
A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts.
## TL;DR
```yaml
dependencies:
- name: common
version: 2.x.x
repository: oci://registry-1.docker.io/bitnamicharts
```
```console
helm dependency update
```
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "common.names.fullname" . }}
data:
myvalue: "Hello World"
```
## Introduction
This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.2.0+
## Parameters
## Special input schemas
### ImageRoot
```yaml
registry:
type: string
description: Docker registry where the image is located
example: docker.io
repository:
type: string
description: Repository and image name
example: bitnami/nginx
tag:
type: string
description: image tag
example: 1.16.1-debian-10-r63
pullPolicy:
type: string
description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
pullSecrets:
type: array
items:
type: string
description: Optionally specify an array of imagePullSecrets (evaluated as templates).
debug:
type: boolean
description: Set to true if you would like to see extra information on logs
example: false
## An instance would be:
# registry: docker.io
# repository: bitnami/nginx
# tag: 1.16.1-debian-10-r63
# pullPolicy: IfNotPresent
# debug: false
```
### Persistence
```yaml
enabled:
type: boolean
description: Whether enable persistence.
example: true
storageClass:
type: string
description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning.
example: "-"
accessMode:
type: string
description: Access mode for the Persistent Volume Storage.
example: ReadWriteOnce
size:
type: string
description: Size the Persistent Volume Storage.
example: 8Gi
path:
type: string
description: Path to be persisted.
example: /bitnami
## An instance would be:
# enabled: true
# storageClass: "-"
# accessMode: ReadWriteOnce
# size: 8Gi
# path: /bitnami
```
### ExistingSecret
```yaml
name:
type: string
description: Name of the existing secret.
example: mySecret
keyMapping:
description: Mapping between the expected key name and the name of the key in the existing secret.
type: object
## An instance would be:
# name: mySecret
# keyMapping:
# password: myPasswordKey
```
#### Example of use
When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets.
```yaml
# templates/secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "common.names.fullname" . }}
labels:
app: {{ include "common.names.fullname" . }}
type: Opaque
data:
password: {{ .Values.password | b64enc | quote }}
# templates/dpl.yaml
---
...
env:
- name: PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }}
key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }}
...
# values.yaml
---
name: mySecret
keyMapping:
password: myPasswordKey
```
### ValidateValue
#### NOTES.txt
```console
{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}}
{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}}
{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }}
```
If we force those values to be empty we will see some alerts
```console
helm install test mychart --set path.to.value00="",path.to.value01=""
'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value:
export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d)
'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value:
export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d)
```
## Upgrading
### To 1.0.0
[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL.
#### What changes were introduced in this major version?
- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field.
- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information.
- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts
#### Considerations when upgrading to this version
- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues
- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore
- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3
#### Useful links
- <https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/>
- <https://helm.sh/docs/topics/v2_v3_migration/>
- <https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/>
## License
Copyright &copy; 2023 VMware, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
<http://www.apache.org/licenses/LICENSE-2.0>
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,139 @@
{{/*
Copyright VMware, Inc.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return a soft nodeAffinity definition
{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes.soft" -}}
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: {{ .key }}
operator: In
values:
{{- range .values }}
- {{ . | quote }}
{{- end }}
weight: 1
{{- end -}}
{{/*
Return a hard nodeAffinity definition
{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes.hard" -}}
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: {{ .key }}
operator: In
values:
{{- range .values }}
- {{ . | quote }}
{{- end }}
{{- end -}}
{{/*
Return a nodeAffinity definition
{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.nodes" -}}
{{- if eq .type "soft" }}
{{- include "common.affinities.nodes.soft" . -}}
{{- else if eq .type "hard" }}
{{- include "common.affinities.nodes.hard" . -}}
{{- end -}}
{{- end -}}
{{/*
Return a topologyKey definition
{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}}
*/}}
{{- define "common.affinities.topologyKey" -}}
{{ .topologyKey | default "kubernetes.io/hostname" -}}
{{- end -}}
{{/*
Return a soft podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
*/}}
{{- define "common.affinities.pods.soft" -}}
{{- $component := default "" .component -}}
{{- $customLabels := default (dict) .customLabels -}}
{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := $extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
weight: 1
{{- range $extraPodAffinityTerms }}
- podAffinityTerm:
labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := .extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
weight: {{ .weight | default 1 -}}
{{- end -}}
{{- end -}}
{{/*
Return a hard podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}}
*/}}
{{- define "common.affinities.pods.hard" -}}
{{- $component := default "" .component -}}
{{- $customLabels := default (dict) .customLabels -}}
{{- $extraMatchLabels := default (dict) .extraMatchLabels -}}
{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := $extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
{{- range $extraPodAffinityTerms }}
- labelSelector:
matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }}
{{- if not (empty $component) }}
{{ printf "app.kubernetes.io/component: %s" $component }}
{{- end }}
{{- range $key, $value := .extraMatchLabels }}
{{ $key }}: {{ $value | quote }}
{{- end }}
topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }}
{{- end -}}
{{- end -}}
{{/*
Return a podAffinity/podAntiAffinity definition
{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}}
*/}}
{{- define "common.affinities.pods" -}}
{{- if eq .type "soft" }}
{{- include "common.affinities.pods.soft" . -}}
{{- else if eq .type "hard" }}
{{- include "common.affinities.pods.hard" . -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,185 @@
{{/*
Copyright VMware, Inc.
SPDX-License-Identifier: APACHE-2.0
*/}}
{{/* vim: set filetype=mustache: */}}
{{/*
Return the target Kubernetes version
*/}}
{{- define "common.capabilities.kubeVersion" -}}
{{- if .Values.global }}
{{- if .Values.global.kubeVersion }}
{{- .Values.global.kubeVersion -}}
{{- else }}
{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
{{- end -}}
{{- else }}
{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for poddisruptionbudget.
*/}}
{{- define "common.capabilities.policy.apiVersion" -}}
{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "policy/v1beta1" -}}
{{- else -}}
{{- print "policy/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for networkpolicy.
*/}}
{{- define "common.capabilities.networkPolicy.apiVersion" -}}
{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for cronjob.
*/}}
{{- define "common.capabilities.cronjob.apiVersion" -}}
{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "batch/v1beta1" -}}
{{- else -}}
{{- print "batch/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for daemonset.
*/}}
{{- define "common.capabilities.daemonset.apiVersion" -}}
{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for deployment.
*/}}
{{- define "common.capabilities.deployment.apiVersion" -}}
{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for statefulset.
*/}}
{{- define "common.capabilities.statefulset.apiVersion" -}}
{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "apps/v1beta1" -}}
{{- else -}}
{{- print "apps/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for ingress.
*/}}
{{- define "common.capabilities.ingress.apiVersion" -}}
{{- if .Values.ingress -}}
{{- if .Values.ingress.apiVersion -}}
{{- .Values.ingress.apiVersion -}}
{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end }}
{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "extensions/v1beta1" -}}
{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "networking.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "networking.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for RBAC resources.
*/}}
{{- define "common.capabilities.rbac.apiVersion" -}}
{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "rbac.authorization.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "rbac.authorization.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for CRDs.
*/}}
{{- define "common.capabilities.crd.apiVersion" -}}
{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "apiextensions.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "apiextensions.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for APIService.
*/}}
{{- define "common.capabilities.apiService.apiVersion" -}}
{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}}
{{- print "apiregistration.k8s.io/v1beta1" -}}
{{- else -}}
{{- print "apiregistration.k8s.io/v1" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Horizontal Pod Autoscaler.
*/}}
{{- define "common.capabilities.hpa.apiVersion" -}}
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
{{- if .beta2 -}}
{{- print "autoscaling/v2beta2" -}}
{{- else -}}
{{- print "autoscaling/v2beta1" -}}
{{- end -}}
{{- else -}}
{{- print "autoscaling/v2" -}}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for Vertical Pod Autoscaler.
*/}}
{{- define "common.capabilities.vpa.apiVersion" -}}
{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}}
{{- if .beta2 -}}
{{- print "autoscaling/v2beta2" -}}
{{- else -}}
{{- print "autoscaling/v2beta1" -}}
{{- end -}}
{{- else -}}
{{- print "autoscaling/v2" -}}
{{- end -}}
{{- end -}}
{{/*
Returns true if the used Helm version is 3.3+.
A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure.
This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error.
**To be removed when the catalog's minimun Helm version is 3.3**
*/}}
{{- define "common.capabilities.supportsHelmVersion" -}}
{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }}
{{- true -}}
{{- end -}}
{{- end -}}

Some files were not shown because too many files have changed in this diff Show More