Compare commits

...

186 Commits

Author SHA1 Message Date
1c60146f33 use amd64 for redis 2025-06-30 22:34:55 +02:00
23adf5a6be remove pve exporter 2025-06-07 22:03:08 +02:00
790d2152d3 update prometheus jobs 2025-06-07 20:58:49 +02:00
3f156984d9 fix prometheus 2025-06-07 20:37:44 +02:00
d834e58879 fix secret 2025-06-07 20:34:56 +02:00
3a4a28598a fix file name 2025-06-07 20:33:14 +02:00
cc00e8de8b update prometheus 2025-06-07 20:26:32 +02:00
6404f7772b update prometheus for proxmox 2025-06-07 20:24:46 +02:00
aee2d151a3 switch to stable 2025-06-02 02:49:47 +02:00
15dd965c7c revert to beta 2025-06-02 02:38:56 +02:00
04be76502d fix pvc 2025-06-02 02:36:06 +02:00
620b5ee9b1 move migration and server into same thing 2025-06-02 02:32:28 +02:00
b3ce3d5067 update tags 2025-06-02 02:28:38 +02:00
cefc5e5565 fix tls 2025-06-02 02:11:13 +02:00
e25b5947fc fix fqdn 2025-06-02 02:04:10 +02:00
cde2250d27 fix redis 2025-06-02 01:45:38 +02:00
4946ee57c1 fix port 2025-06-02 01:43:41 +02:00
a4ebfa259c fix port 2025-06-02 01:42:00 +02:00
c447b1be48 fix port 2025-06-02 01:40:48 +02:00
35f6eaf618 fix image 2025-06-02 01:37:07 +02:00
f6603d55a7 use rwm 2025-06-02 01:35:30 +02:00
742396fd8e fix image 2025-06-02 01:32:49 +02:00
fd004a7479 add affine 2025-06-02 01:31:34 +02:00
08331f6ae3 update outline 2025-06-02 01:14:53 +02:00
89173418cd update outline 2025-06-02 01:14:13 +02:00
69356bb160 disable signup 2025-06-02 01:05:58 +02:00
cf6f3546d4 update key 2025-06-02 01:03:32 +02:00
89a5d04c42 update saml 2025-06-02 01:02:23 +02:00
d0e9acf392 update fqdn 2025-06-02 00:50:42 +02:00
7f22d664bb fix ingress 2025-06-02 00:24:26 +02:00
624b2bb1b8 fix ingress 2025-06-02 00:23:05 +02:00
1105260935 fix ingress 2025-06-02 00:20:10 +02:00
ef850cd4f1 fix ingress 2025-06-02 00:19:33 +02:00
333490d4c2 fix ingress 2025-06-02 00:19:05 +02:00
083891c884 fix ingress 2025-06-02 00:06:05 +02:00
655e0691c2 fix ingress 2025-06-02 00:05:40 +02:00
9d236e1f97 fix readiness probe 2025-06-01 23:51:00 +02:00
c4d4098b99 fix ingress 2025-06-01 23:43:56 +02:00
ff96741d23 fix api 2025-06-01 23:39:35 +02:00
9cfab300d0 fix deploy 2025-06-01 23:37:04 +02:00
c655dec0bf fix ingress 2025-06-01 23:35:48 +02:00
dfe8eb3d46 revert psql edit 2025-06-01 23:27:03 +02:00
6e4a07076a fix cluster type 2025-06-01 23:23:05 +02:00
b740b48782 add vector psql ext 2025-06-01 23:22:07 +02:00
d1acf204ce fix port 2025-06-01 23:16:19 +02:00
73714929f9 fix env 2025-06-01 23:13:29 +02:00
81177b18d5 add appflowy 2025-06-01 23:11:51 +02:00
e2c84e0bf8 update prometheus 2025-05-31 15:31:36 +02:00
dea4045dc6 delete bitpoke mysql because it's bad 2025-04-14 19:01:26 +02:00
e37aac251a fix s3 2025-04-14 18:59:22 +02:00
2f06076990 fix s3 2025-04-14 18:58:04 +02:00
0c3cce909b add minio for cloning 2025-04-14 18:43:17 +02:00
c11a777700 fix nodeSelector 2025-04-14 18:18:09 +02:00
9e6467f6bb fix nodeselector 2025-04-14 18:14:53 +02:00
029918de44 update mysql cluster 2025-04-14 18:11:05 +02:00
7697f2f36e fix arch 2025-04-14 14:41:32 +02:00
84a03a6eac fix chart.yaml 2025-04-14 14:38:42 +02:00
209b21c83f fix yaml 2025-04-14 14:32:26 +02:00
2b032964a2 move mysql operator and cluster 2025-04-14 14:30:26 +02:00
6e2597ffa7 fix yaml 2025-04-14 14:28:00 +02:00
dc6f4a0555 update chart 2025-04-14 14:15:43 +02:00
7f2240ff6a add mysql ha 2025-04-14 14:12:07 +02:00
3e33b17c2c fix ingress 2025-04-14 12:41:27 +02:00
aed1806127 fix missing clickhouse svc 2025-04-14 12:37:45 +02:00
57db805f10 fix clickhouse db url 2025-04-14 12:35:28 +02:00
8dd4f30803 fix mailer exec format error 2025-04-14 12:31:10 +02:00
fcf2450a8e fix mailer exec format error 2025-04-14 12:29:13 +02:00
c2818b1c8c fix yaml 2025-04-14 12:04:12 +02:00
eda474ec92 add plausible 2025-04-14 11:59:56 +02:00
f987f9f3ec update longhorn default replicas 2025-04-13 16:00:08 +02:00
e3364afe28 update values 2025-04-13 11:55:23 +02:00
5d8a4e1791 update values 2025-04-13 11:54:01 +02:00
b783db47b9 update descheduler values 2025-04-13 11:49:46 +02:00
2a623cf21f update descheduler values 2025-04-13 11:47:54 +02:00
be7e80d716 fix descheduler values 2025-04-13 11:44:57 +02:00
8e42b7f782 Merge remote-tracking branch 'origin/main' 2025-04-13 11:31:40 +02:00
db25c37cde add descheduler helm 2025-04-13 11:27:16 +02:00
f7144a7cdf update jsonnet for prometheus volume 2025-04-13 00:37:51 +02:00
687b6585e6 fix minio svc 2025-04-13 00:15:05 +02:00
52dd463de1 fix minio svc 2025-04-13 00:12:30 +02:00
1ac6d41e26 remove buggy probes 2025-04-12 23:59:44 +02:00
e41c5e8ca7 fix host 2025-04-12 23:54:49 +02:00
fc203268bc fix host 2025-04-12 23:51:48 +02:00
a64f2b4bfe update probes 2025-04-12 23:48:15 +02:00
768f29680b update probes 2025-04-12 23:43:12 +02:00
f57ea1afd9 update bind addr 2025-04-12 23:39:41 +02:00
43860d2464 fix probes 2025-04-12 23:36:27 +02:00
85f6f81e23 try to make outline HA 2025-04-12 23:30:06 +02:00
55bbc6d2d4 increase body size 2025-04-12 23:04:59 +02:00
3744e9fb82 increase body size 2025-04-12 23:04:17 +02:00
94d313666a update url 2025-04-12 22:58:06 +02:00
47d88bdf99 update keycloak pointers 2025-04-12 22:37:59 +02:00
ee6b0c8ab3 fix ingress 2025-04-12 22:34:04 +02:00
9034ec500b remove volumes 2025-04-12 22:32:15 +02:00
8fbe87890c add outline wiki 2025-04-12 22:28:21 +02:00
837371313b add region to minio config 2025-04-12 21:20:35 +02:00
f854919802 add minio secrets 2025-04-12 21:04:55 +02:00
3f58967ebd fix redirect url 2025-04-06 21:40:59 +02:00
9d769840b7 fix ingress 2025-04-06 21:38:55 +02:00
d75dd0fca4 fix ingress 2025-04-06 21:36:01 +02:00
f55875bc8f fix console port 2025-04-06 21:32:55 +02:00
3cd7a391a1 fix svc 2025-04-06 21:30:27 +02:00
189a664a23 fix secret 2025-04-06 21:27:49 +02:00
c52bfb3045 fix ingress 2025-04-06 21:24:35 +02:00
50dc452b0d fix tenant 2025-04-06 21:20:36 +02:00
ece670c77e fix minio 2025-04-06 21:16:23 +02:00
a40c495e26 fix rbac 2025-04-06 21:14:40 +02:00
0ccb40b8fc fix tls for minio 2025-04-06 21:13:03 +02:00
70019d0775 fix tls 2025-04-06 21:09:30 +02:00
8553165048 fix tls name 2025-04-06 21:06:48 +02:00
a8058e745d fix tls 2025-04-06 21:03:32 +02:00
c8501f505b fix tls termination 2025-04-06 20:58:12 +02:00
4709f6ba84 fix typo 2025-04-06 20:50:20 +02:00
d61af7e58c fix s3 endpoint 2025-04-06 20:46:57 +02:00
e93179896b use clusterip 2025-04-06 20:28:59 +02:00
dbb6381898 fix minio svc 2025-04-06 20:24:56 +02:00
1e4a007d72 update tenant name 2025-04-06 20:16:59 +02:00
5d62486f55 fix minio tenant 2025-04-06 20:12:46 +02:00
7558f369c5 add minio tenant 2025-04-06 20:08:40 +02:00
38e230a9a5 add minio operator 2025-04-06 19:56:09 +02:00
e20674bd1d fix elk for grafana 2025-04-06 12:32:17 +02:00
f98fad2e88 make nexus run on arm 2025-04-06 00:44:54 +02:00
69f9ff7bfb fix ingress 2025-04-05 23:58:37 +02:00
2c849582c9 fix secret name 2025-04-05 23:56:04 +02:00
3ddf7b22b1 update nexus deploy 2025-04-05 23:51:35 +02:00
79349f9743 fix yaml 2025-04-05 23:48:44 +02:00
9a3f4bac60 add nexus oss 2025-04-05 23:45:57 +02:00
556617ece5 update ingress 2025-04-05 22:59:25 +02:00
d35c568250 update values.yaml 2025-04-05 22:58:23 +02:00
878eeb1c4b add templates 2025-04-05 22:55:08 +02:00
815255d4da add ingress 2025-04-05 22:53:20 +02:00
96a4e310c8 update values.yaml 2025-04-05 22:47:14 +02:00
bb2dc111a0 fix yaml 2025-04-05 22:37:43 +02:00
d63ee71bfa add uptime kuma 2025-04-05 22:35:49 +02:00
ebc754b4af fix n8n domain 2025-04-04 21:23:44 +02:00
76fcc1da6c fix n8n svc 2025-04-04 21:06:14 +02:00
0f012ef2ad fix yaml 2025-04-04 21:02:23 +02:00
ccf14644a7 fix yaml 2025-04-04 21:01:35 +02:00
2ae346e8ef add n8n 2025-04-04 20:59:32 +02:00
c2cfef7397 update values 2025-04-03 15:28:53 +02:00
c95f585e80 fix secret 2025-04-03 15:26:54 +02:00
aa6a51e4b5 update values.yaml 2025-04-03 15:25:50 +02:00
71b6b05c41 fix values.yaml 2025-04-03 15:24:09 +02:00
17678c914a update values.yaml 2025-04-03 15:22:03 +02:00
d073c01735 update values 2025-04-03 15:19:07 +02:00
571c3a4dbb update rocket-chat 2025-04-03 15:16:10 +02:00
31f6b361ac add deps 2025-04-03 15:11:37 +02:00
5127028f6d add rocketchat helm 2025-04-03 15:02:31 +02:00
50847afaa0 add rocketchat 2025-04-03 14:54:17 +02:00
e788b770d9 update oauth2 proxy 2025-04-03 11:32:28 +02:00
ed0878a9bc temp fix kibana 2025-03-31 10:47:57 +02:00
ad4708fdff update kibana 2025-03-31 10:41:56 +02:00
cf707d1887 fix kibana 2025-03-31 10:39:33 +02:00
f111b6337d fix kibana 2025-03-31 10:38:15 +02:00
7a33d8212b make kibana use oauth2 2025-03-31 10:30:57 +02:00
e9a4de02cc Revert "try oauth2 proxy for prom"
This reverts commit 027c9edb6d.
2025-03-31 10:23:12 +02:00
027c9edb6d try oauth2 proxy for prom 2025-03-31 10:21:56 +02:00
6ad3654593 fix elastic 2025-03-31 10:06:38 +02:00
f0e10ed035 add elastic data source 2025-03-31 10:04:29 +02:00
25946ad705 fix nginx kibana 2025-03-31 04:24:36 +02:00
c7a97af6a9 add kibana ingress 2025-03-31 04:15:19 +02:00
ea82169286 add kibana 2025-03-31 04:10:52 +02:00
38c43eee1d temp disable longhorn 2025-03-31 04:05:34 +02:00
ae368b504c reduce volume 2025-03-31 03:59:57 +02:00
15c3ca05f8 fix typo 2025-03-31 03:56:59 +02:00
3ebbd128e1 add elasticsearch 2025-03-31 03:56:39 +02:00
3eec7c3d02 add elasticsearch 2025-03-31 03:56:03 +02:00
29bad892fc fix elk 2025-03-31 03:53:40 +02:00
4d6a5c4984 fix typo 2025-03-31 03:50:40 +02:00
9105673f57 move elk stack 2025-03-31 03:48:51 +02:00
727cc862bd update elk stack 2025-03-31 03:48:44 +02:00
42151c6eb6 add elk stack 2025-03-31 03:43:37 +02:00
03acc85016 disable redis for oauth2 proxy 2025-03-31 01:47:27 +02:00
779b2f8255 remove regex 2025-03-31 01:34:18 +02:00
56ca246d9b fix oauth2 longhorn 2025-03-31 01:23:25 +02:00
a8df93957f update oauth2 longhorn 2025-03-31 01:18:07 +02:00
04b4424ceb fix oauth2 proxy 2025-03-31 01:13:27 +02:00
c1c8b93632 update oauth2 proxy 2025-03-31 01:09:51 +02:00
d8c72b579b protect longhorn 2025-03-31 01:06:29 +02:00
d4e3f82013 Revert "speedup renovate"
This reverts commit 5bb7ac4c36.
2025-03-31 00:50:31 +02:00
0d96daac03 slow down renovate 2025-03-31 00:38:54 +02:00
62f039141c update postgres pvc 2025-03-31 00:30:23 +02:00
a7dca761f2 rollback prometheus 2025-03-30 23:45:12 +02:00
5663e93c2d update prometheus 2025-03-30 19:42:41 +02:00
d86d651c4f Revert "fix prometheus"
This reverts commit a3d3dd8336.
2025-03-30 19:34:16 +02:00
088c534cde Revert "update prometheus"
This reverts commit 1e7ef34b5c.
2025-03-30 19:34:16 +02:00
285 changed files with 43900 additions and 33 deletions

BIN
.DS_Store vendored

Binary file not shown.

3
.gitignore vendored
View File

@@ -1 +1,2 @@
**/secret.yaml
**/.DS_Store
.idea/

BIN
deploy/.DS_Store vendored

Binary file not shown.

View File

@@ -0,0 +1,93 @@
# --------------------------------------------------------------------
# 5b) Deployment: affine-server (serves HTTP on port 3010)
# --------------------------------------------------------------------
apiVersion: apps/v1
kind: Deployment
metadata:
name: affine-server
namespace: affine
labels:
app: affine-server
spec:
replicas: 1
selector:
matchLabels:
app: affine-server
template:
metadata:
labels:
app: affine-server
spec:
initContainers:
- name: affine-migrate
image: ghcr.io/toeverything/affine-graphql:stable-9e7280c
command: ["sh", "-c", "node ./scripts/self-host-predeploy.js"]
env:
- name: REDIS_SERVER_HOST
value: "redis-lb.redis.svc.cluster.local"
- name: REDIS_SERVER_PORT
value: "6379"
- name: DATABASE_URL
value: >
postgresql://$(DB_USERNAME):$(DB_PASSWORD)@postgres-base-rw.postgres.svc.cluster.local:5432/$(DB_DATABASE)
- name: AFFINE_SERVER_PORT
value: "3010"
envFrom:
- secretRef:
name: affine-db-secret
volumeMounts:
- name: affine-storage
mountPath: /root/.affine/storage
- name: affine-config
mountPath: /root/.affine/config
containers:
- name: affine
image: ghcr.io/toeverything/affine-graphql:stable-9e7280c
ports:
- containerPort: 3010
name: http
env:
- name: NODE_TLS_REJECT_UNAUTHORIZED
value: "0"
- name: AFFINE_SERVER_HTTPS
value: "true"
- name: AFFINE_SERVER_HOST
value: "affine.prod.panic.haus"
- name: REDIS_SERVER_HOST
value: "redis-lb.redis.svc.cluster.local"
- name: REDIS_SERVER_PORT
value: "6379"
- name: DATABASE_URL
value: >-
postgresql://$(DB_USERNAME):$(DB_PASSWORD)@postgres-base-rw.postgres.svc.cluster.local:5432/$(DB_DATABASE)
- name: AFFINE_SERVER_EXTERNAL_URL
value: "https://affine.prod.panic.haus"
- name: AFFINE_SERVER_PORT
value: "3010"
envFrom:
- secretRef:
name: affine-db-secret
readinessProbe:
httpGet:
path: /health
port: 3010
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 3010
initialDelaySeconds: 30
periodSeconds: 20
volumeMounts:
- name: affine-storage
mountPath: /root/.affine/storage
- name: affine-config
mountPath: /root/.affine/config
volumes:
- name: affine-storage
persistentVolumeClaim:
claimName: affine-storage-pvc
- name: affine-config
persistentVolumeClaim:
claimName: affine-config-pvc

View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: affine-ingress
namespace: affine
annotations:
# (If youre using cert-manager + Lets Encrypt)
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
spec:
ingressClassName: nginx
tls:
- hosts:
- affine.prod.panic.haus # ← replace with your desired Affine hostname
secretName: affine-tls # ← must match an existing TLS Secret for that host
rules:
- host: affine.prod.panic.haus # ← change to whatever subdomain you choose
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: affine-server
port:
number: 3010

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: affine
resources:
- secret.yaml
- pvc.yaml
- service.yaml
- deployment.yaml
- ingress.yaml

28
deploy/affine/pvc.yaml Normal file
View File

@@ -0,0 +1,28 @@
# 3a) PVC for Affines upload storage (~/root/.affine/storage)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: affine-storage-pvc
namespace: affine
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
# 3b) PVC for Affines config (~/root/.affine/config)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: affine-config-pvc
namespace: affine
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

10
deploy/affine/secret.yaml Normal file
View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: affine-db-secret
namespace: affine
stringData:
# Database credentials for Affine
DB_USERNAME: "affine"
DB_PASSWORD: "tqMB9UjJ7GZrWnux4sJ9nDPR4xQLq6Vz"
DB_DATABASE: "affine_db"

View File

@@ -0,0 +1,15 @@
# This Service exposes Affine on port 3010 within the cluster
apiVersion: v1
kind: Service
metadata:
name: affine-server
namespace: affine
spec:
selector:
app: affine-server
ports:
- name: http
port: 3010
targetPort: 3010
protocol: TCP
type: ClusterIP

View File

@@ -0,0 +1,350 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: gotrue
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: gotrue
template:
metadata:
labels:
app: gotrue
spec:
containers:
- name: gotrue
image: appflowyinc/gotrue:latest
ports:
- containerPort: 9999
env:
- name: GOTRUE_SAML_ENABLED
value: "true"
- name: GOTRUE_SAML_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_SAML_PRIVATE_KEY
# ----- DB (Postgres HA) -----
- name: GOTRUE_DB_DRIVER
value: postgres
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_DATABASE_URL
- name: GOTRUE_ADMIN_EMAIL
value: hello@beatrice.wtf
- name: GOTRUE_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_ADMIN_PASSWORD
- name: GOTRUE_DISABLE_SIGNUP
value: "true"
- name: GOTRUE_SITE_URL
value: "appflowy-flutter://"
- name: GOTRUE_URI_ALLOW_LIST
value: "**"
- name: GOTRUE_JWT_SECRET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_JWT_SECRET
- name: GOTRUE_JWT_EXP
value: "7200"
- name: GOTRUE_SMTP_HOST
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_HOST
- name: GOTRUE_SMTP_PORT
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PORT
- name: GOTRUE_SMTP_USER
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: GOTRUE_SMTP_PASS
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PASS
- name: GOTRUE_SMTP_ADMIN_EMAIL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: PORT
value: "9999"
- name: GOTRUE_JWT_ADMIN_GROUP_NAME
value: supabase_admin
- name: API_EXTERNAL_URL
value: https://orbit.panic.haus/gotrue
- name: GOTRUE_MAILER_URLPATHS_CONFIRMATION
value: /gotrue/verify
- name: GOTRUE_MAILER_URLPATHS_INVITE
value: /gotrue/verify
- name: GOTRUE_MAILER_URLPATHS_RECOVERY
value: /gotrue/verify
- name: GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE
value: /gotrue/verify
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: appflowy-cloud
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: appflowy-cloud
template:
metadata:
labels:
app: appflowy-cloud
spec:
containers:
- name: appflowy-cloud
image: appflowyinc/appflowy_cloud:latest
ports:
- containerPort: 8000
env:
# ----- Database -----
- name: APPFLOWY_DATABASE_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_DATABASE_URL
- name: APPFLOWY_REDIS_URI
value: "redis://redis-lb.redis.svc.cluster.local:6379"
# ----- GoTrue (Auth) -----
- name: APPFLOWY_GOTRUE_BASE_URL
value: "http://gotrue.appflowy.svc.cluster.local:9999"
- name: APPFLOWY_GOTRUE_JWT_SECRET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_JWT_SECRET
- name: APPFLOWY_GOTRUE_JWT_EXP
value: "7200"
# ----- S3 / Minio -----
- name: APPFLOWY_S3_USE_MINIO
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_USE_MINIO
- name: APPFLOWY_S3_MINIO_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_MINIO_URL
- name: APPFLOWY_S3_BUCKET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_BUCKET
- name: APPFLOWY_S3_REGION
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_REGION
- name: APPFLOWY_S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_ACCESS_KEY
- name: APPFLOWY_S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_SECRET_KEY
#- name: APPFLOWY_S3_PRESIGNED_URL_ENDPOINT
# value: "https://minio.example.com"
# ← Replace with your actual public Minio endpoint if different
# ----- Mailer (AppFlowy Cloud) -----
- name: APPFLOWY_MAILER_SMTP_HOST
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_HOST
- name: APPFLOWY_MAILER_SMTP_PORT
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PORT
- name: APPFLOWY_MAILER_SMTP_USERNAME
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: APPFLOWY_MAILER_SMTP_PASSWORD
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_PASS
- name: APPFLOWY_MAILER_SMTP_EMAIL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_USER
- name: APPFLOWY_MAILER_SMTP_TLS_KIND
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: SMTP_TLS_KIND
# ----- General -----
- name: APPFLOWY_ACCESS_CONTROL
value: "true"
- name: RUST_LOG
value: info
- name: APPFLOWY_ENVIRONMENT
value: production
- name: APPFLOWY_WEB_URL
value: "https://orbit.panic.haus" # ← your public AppFlowy URL
readinessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 20
periodSeconds: 20
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: admin-frontend
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: admin-frontend
template:
metadata:
labels:
app: admin-frontend
spec:
containers:
- name: admin-frontend
image: appflowyinc/admin_frontend:latest
ports:
- containerPort: 80
env:
- name: ADMIN_FRONTEND_REDIS_URL
value: "redis://redis-lb.redis.svc.cluster.local:6379"
- name: ADMIN_FRONTEND_GOTRUE_URL
value: "http://gotrue.appflowy.svc.cluster.local:9999"
- name: ADMIN_FRONTEND_APPFLOWY_CLOUD_URL
value: "http://appflowy-cloud.appflowy.svc.cluster.local:8000"
- name: ADMIN_FRONTEND_PATH_PREFIX
value: "/console"
- name: ADMIN_FRONTEND_PORT
value: "80"
readinessProbe:
httpGet:
path: /console
port: 80
initialDelaySeconds: 5
periodSeconds: 10
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: appflowy-worker
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: appflowy-worker
template:
metadata:
labels:
app: appflowy-worker
spec:
containers:
- name: appflowy-worker
image: appflowyinc/appflowy_worker:latest
env:
- name: RUST_LOG
value: info
- name: APPFLOWY_ENVIRONMENT
value: production
- name: APPFLOWY_WORKER_REDIS_URL
value: "redis://redis-lb.redis.svc.cluster.local:6379"
- name: APPFLOWY_WORKER_DATABASE_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: GOTRUE_DATABASE_URL
- name: APPFLOWY_WORKER_DATABASE_NAME
value: appflowy_db
- name: APPFLOWY_S3_USE_MINIO
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_USE_MINIO
- name: APPFLOWY_S3_MINIO_URL
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_MINIO_URL
- name: APPFLOWY_S3_ACCESS_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_ACCESS_KEY
- name: APPFLOWY_S3_SECRET_KEY
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: AWS_SECRET_KEY
- name: APPFLOWY_S3_BUCKET
valueFrom:
secretKeyRef:
name: appflowy-secrets
key: APPFLOWY_S3_BUCKET
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: appflowy-web
namespace: appflowy
spec:
replicas: 1
selector:
matchLabels:
app: appflowy-web
template:
metadata:
labels:
app: appflowy-web
spec:
containers:
- name: appflowy-web
image: appflowyinc/appflowy_web:latest
ports:
- containerPort: 80
env:
- name: APPFLOWY_CLOUD_URL
value: "http://appflowy-cloud.appflowy.svc.cluster.local:8000"

View File

@@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: appflowy-gotrue-ingress
namespace: appflowy
annotations:
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/rewrite-target: /$2
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
tls:
- hosts:
- orbit.panic.haus
secretName: appflowy-tls
rules:
- host: orbit.panic.haus
http:
paths:
# GoTrue: rewrite /gotrue(/|$)(.*) → /$2
- path: /gotrue(/|$)(.*)
pathType: ImplementationSpecific
backend:
service:
name: gotrue
port:
number: 9999

View File

@@ -0,0 +1,56 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: appflowy-ingress
namespace: appflowy
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
spec:
ingressClassName: nginx
tls:
- hosts:
- orbit.panic.haus # ← replace with your public domain
secretName: appflowy-tls
rules:
- host: orbit.panic.haus
http:
paths:
# ┌──────────────────────────────────────────────────────────────────────────────┐
# │ 1) Admin UI (served under /console) │
# └──────────────────────────────────────────────────────────────────────────────┘
- path: /console
pathType: Prefix
backend:
service:
name: admin-frontend
port:
number: 80
# ┌──────────────────────────────────────────────────────────────────────────────┐
# │ 3) AppFlowy-Cloud API & Web │
# • If you want API served on /api, and the static Web on / │
# • You could also send all traffic to appflowy-web and let it call │
# • the backend at /api internally. │
# └──────────────────────────────────────────────────────────────────────────────┘
# a) Direct all `/api/*` calls to the backend service
- path: /api
pathType: Prefix
backend:
service:
name: appflowy-cloud
port:
number: 8000
# b) Everything else (root path) → appflowy-web (static UI)
- path: /
pathType: Prefix
backend:
service:
name: appflowy-web
port:
number: 80

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: appflowy
resources:
- secret.yaml
- deployment.yaml
- service.yaml
- ingress.yaml
- gotrue-ingress.yaml

View File

@@ -0,0 +1,46 @@
apiVersion: v1
kind: Secret
metadata:
name: appflowy-secrets
namespace: appflowy
stringData:
FQDN: "orbit.panic.haus"
SCHEME: "https"
APPFLOWY_BASE_URL: "https://orbit.panic.haus"
APPFLOWY_WEB_URL: "https://orbit.panic.haus"
# ==== PostgreSQL credentials ====
GOTRUE_DATABASE_URL: "postgres://appflowy:AjUIkz5lcaEGpCrO9KHYAvaKbLsH2Q0e@postgres-base-rw.postgres.svc.cluster.local:5432/appflowy_db?search_path=auth"
APPFLOWY_DATABASE_URL: "postgres://appflowy:AjUIkz5lcaEGpCrO9KHYAvaKbLsH2Q0e@postgres-base-rw.postgres.svc.cluster.local:5432/appflowy_db"
# ==== GoTrue (Auth) keys ====
GOTRUE_JWT_SECRET: "5IqQzMmpRPoeParMsgoWIphrCYdhFhxz9NSyEQYlwGyTrRSsjInyMSaM44ZCH"
GOTRUE_ADMIN_PASSWORD: "KaTPKUXiDUVIcUYWjqSy5SFdqrIl5csS"
GOTRUE_SAML_PRIVATE_KEY: "MIIEpAIBAAKCAQEAz625FMeC/kzE3M1PcX9klYmq4cNJKCXFl3UAiu/VR+RsPoMskVloyNaeESx+C/XhjMySxOGyeAepO6haIybMFqbEiBPjkQNASYUcEdp+pfGliTkgkiffiq3qSIt+ZylVUniGEEnM3JznIoFlW9ikNDlCTRObasIQ0io3bwaRP5pnzMDAPc7i8xBlkybj8Mu3HZmGU+xqiv1zNP9kSWINsMm4wt6Lwqbqt+LNr0q+F3H9yORbErFEGRsAsPMTtPIwX8eUb241MU5WmQ8n7Ea/U1+E3scaPr44TSZg9Xl+KwEVhdX9yX6/QKBefv6d/IwgNVkHxyRmRh9dkONxhWZ/8wIDAQABAoIBAAQjEEhHLydUrSk+18HJiV3nN6W2p6rqkbSSKpgZ7fQ4KyXVpBojH1C84boy2jHvzHXrD1NnsY/tiyP6lw0TNUaQPOL/Dm3xlCLCyYvbf+FbXnJM1obCz5OqIjwetz5j1uTFLNp/NdsBLyODU1sQhjjaGSWC6fom8oHVQHRwO416Qz11ZrOzXB8WDUyPImFkT7hU5F2MJFLU94MY6dBC0NKQBWIvFZQMN8WHoTeTlDcdljN9qduqDFAdMZi6JW0YNr0Ycvgt5qn/Me5EFN3+s3RVRnTL/rSENKeKJFcDXes3XEKxbwtzMVqa6sHZrt6LJtN8jx3tpryD2priCjC0TU0CgYEA7RdDpqmgtkeWHeu5nfzJyE1TEvl2qIezhpNIBwYuzACWRWWbzK3XKaLMq91JJSacHLB9kYpp08Rzsk33m6OLk+Q7g1E8ltHyMvR8avX7kczbL4/FV50Ydb14MOrPPlL/xemL0/faIRmfhGaQ3XgOAIqCoYIPY3HHjCUAMRDpZI8CgYEA4D3xJ9+qCtqzwf6afBHfymkCkEn8mO+4dB6kdXIjppor0EW8Xvg4zDYMq9RmO/ROUQypljCLrwx9ZiElNPTwmIAFPWjuSpAEyzZdxEz0H01PhwERvdMtt6FFTSGRQsTUzWTa7oYAn8K/Fu4VkKBVdbmqQhfUdsk+/RqUHRw/iF0CgYEA400th6gSsw7YpeDr+MJ09brkTUmrcBGBlSC4qjtMPDrH1sp+XvG/WWSCErc5PAvTGVI/YHwxz1wFi8lh/O4DkAr8333Pt8yaBi4M5kLkJ7kd3nBYwxGSdLbsdwF3JQpPuv+YFeUGVDuLilUGx70kt3IToSHe/PkFVZ/XmjLbf5MCgYAHAQhKRYsrIZ+hvJEYtPo3eUYyOY1hPYOWZOqgHHuOlZwuui7jDH/BqSKGL3EuCDh2AZ4+aa/DPPGhwgFGgSwOp1kCjQd8Xrk3m7AcFIc/fwuv3NGwCyuPY8MlYJoH6tv2umK4NolIdC3Bypfz134z2iO+Qr5JI4oLH8xmiF5XpQKBgQDM+vmlxMcHfl0OcnAJuQ0SaqVk6ufrMKRg8dPSvn2G84LdF3Vbr0Qx0vCRrmz85Netj5RdqkQh1dhi/QWMGegMw+bPmrDM6/CCEhT+9e6v5r2iKt3BbskbWdyhTm/nX98Er139/0xllF5Cyx54Xq2cTnDEM/Zaq+UXREHTr/L61Q=="
# ==== Minio (S3) ====
APPFLOWY_S3_MINIO_URL: "https://s3.minio.panic.haus"
MINIO_HOST: "s3.minio.panic.haus"
MINIO_PORT: "443"
AWS_ACCESS_KEY: "rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow" # must match your Minio secret
AWS_SECRET_KEY: "kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z" # must match your Minio secret
APPFLOWY_S3_BUCKET: "appflowy" # your bucket name
APPFLOWY_S3_USE_MINIO: "true"
AWS_REGION: "cluster-panic-haus"
# If you use AWS S3 instead of Minio, set APPFLOWY_S3_CREATE_BUCKET / AWS_REGION here.
# ==== GoTrue SMTP (optional) ====
SMTP_HOST: "mail.mind-overflow.net"
SMTP_PORT: "465"
SMTP_USER: "cloud@mind-overflow.net"
SMTP_PASS: "PcYchuLLUyfT2gvY4Tx7wQ575Tnqjx84zVNoP6Mb"
SMTP_ADMIN_EMAIL: "hello@beatrice.wtf"
# ==== AppFlowy Mailer (Cloud) ====
SMTP_EMAIL: "cloud@mind-overflow.net"
SMTP_TLS_KIND: "wrapper" # "none" "wrapper" "required" "opportunistic"
# ==== Additional secrets for AppFlowy AI (if used) ====
AI_OPENAI_API_KEY: ""
# (Optional) any other secrets you need can go here.

View File

@@ -0,0 +1,95 @@
apiVersion: v1
kind: Service
metadata:
name: gotrue
namespace: appflowy
spec:
ports:
- port: 9999
targetPort: 9999
protocol: TCP
name: http
selector:
app: gotrue
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-cloud
namespace: appflowy
spec:
ports:
- port: 8000
targetPort: 8000
protocol: TCP
name: http
selector:
app: appflowy-cloud
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: admin-frontend
namespace: appflowy
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app: admin-frontend
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-worker
namespace: appflowy
spec:
ports:
- port: 8081
targetPort: 8081
protocol: TCP
name: http
selector:
app: appflowy-worker
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-web
namespace: appflowy
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http
selector:
app: appflowy-web
type: ClusterIP
# (If you added appflowy-ai)
---
apiVersion: v1
kind: Service
metadata:
name: appflowy-ai
namespace: appflowy
spec:
ports:
- port: 5001
targetPort: 5001
protocol: TCP
name: http
selector:
app: appflowy-ai
type: ClusterIP

View File

@@ -6,6 +6,6 @@ metadata:
spec:
instances: 3
storage:
size: 10Gi
size: 20Gi
storageClass: longhorn
enableSuperuserAccess: true
enableSuperuserAccess: true

View File

@@ -0,0 +1,19 @@
apiVersion: v1
appVersion: 0.32.2
description: Descheduler for Kubernetes is used to rebalance clusters by evicting
pods that can potentially be scheduled on better nodes. In the current implementation,
descheduler does not schedule replacement of evicted pods but relies on the default
scheduler for that.
home: https://github.com/kubernetes-sigs/descheduler
icon: https://kubernetes.io/images/favicon.png
keywords:
- kubernetes
- descheduler
- kube-scheduler
maintainers:
- email: kubernetes-sig-scheduling@googlegroups.com
name: Kubernetes SIG Scheduling
name: descheduler
sources:
- https://github.com/kubernetes-sigs/descheduler
version: 0.32.2

View File

@@ -0,0 +1,91 @@
# Descheduler for Kubernetes
[Descheduler](https://github.com/kubernetes-sigs/descheduler/) for Kubernetes is used to rebalance clusters by evicting pods that can potentially be scheduled on better nodes. In the current implementation, descheduler does not schedule replacement of evicted pods but relies on the default scheduler for that.
## TL;DR:
```shell
helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/
helm install my-release --namespace kube-system descheduler/descheduler
```
## Introduction
This chart bootstraps a [descheduler](https://github.com/kubernetes-sigs/descheduler/) cron job on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.14+
## Installing the Chart
To install the chart with the release name `my-release`:
```shell
helm install --namespace kube-system my-release descheduler/descheduler
```
The command deploys _descheduler_ on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```shell
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the _descheduler_ chart and their default values.
| Parameter | Description | Default |
| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
| `kind` | Use as CronJob or Deployment | `CronJob` |
| `image.repository` | Docker repository to use | `registry.k8s.io/descheduler/descheduler` |
| `image.tag` | Docker tag to use | `v[chart appVersion]` |
| `image.pullPolicy` | Docker image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Docker repository secrets | `[]` |
| `nameOverride` | String to partially override `descheduler.fullname` template (will prepend the release name) | `""` |
| `fullnameOverride` | String to fully override `descheduler.fullname` template | `""` |
| `namespaceOverride` | Override the deployment namespace; defaults to .Release.Namespace | `""` |
| `cronJobApiVersion` | CronJob API Group Version | `"batch/v1"` |
| `schedule` | The cron schedule to run the _descheduler_ job on | `"*/2 * * * *"` |
| `startingDeadlineSeconds` | If set, configure `startingDeadlineSeconds` for the _descheduler_ job | `nil` |
| `timeZone` | configure `timeZone` for CronJob | `nil` |
| `successfulJobsHistoryLimit` | If set, configure `successfulJobsHistoryLimit` for the _descheduler_ job | `3` |
| `failedJobsHistoryLimit` | If set, configure `failedJobsHistoryLimit` for the _descheduler_ job | `1` |
| `ttlSecondsAfterFinished` | If set, configure `ttlSecondsAfterFinished` for the _descheduler_ job | `nil` |
| `deschedulingInterval` | If using kind:Deployment, sets time between consecutive descheduler executions. | `5m` |
| `replicas` | The replica count for Deployment | `1` |
| `leaderElection` | The options for high availability when running replicated components | _see values.yaml_ |
| `cmdOptions` | The options to pass to the _descheduler_ command | _see values.yaml_ |
| `priorityClassName` | The name of the priority class to add to pods | `system-cluster-critical` |
| `rbac.create` | If `true`, create & use RBAC resources | `true` |
| `resources` | Descheduler container CPU and memory requests/limits | _see values.yaml_ |
| `serviceAccount.create` | If `true`, create a service account for the cron job | `true` |
| `serviceAccount.name` | The name of the service account to use, if not set and create is true a name is generated using the fullname template | `nil` |
| `serviceAccount.annotations` | Specifies custom annotations for the serviceAccount | `{}` |
| `podAnnotations` | Annotations to add to the descheduler Pods | `{}` |
| `podLabels` | Labels to add to the descheduler Pods | `{}` |
| `nodeSelector` | Node selectors to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `service.enabled` | If `true`, create a service for deployment | `false` |
| `serviceMonitor.enabled` | If `true`, create a ServiceMonitor for deployment | `false` |
| `serviceMonitor.namespace` | The namespace where Prometheus expects to find service monitors | `nil` |
| `serviceMonitor.additionalLabels` | Add custom labels to the ServiceMonitor resource | `{}` |
| `serviceMonitor.interval` | The scrape interval. If not set, the Prometheus default scrape interval is used | `nil` |
| `serviceMonitor.honorLabels` | Keeps the scraped data's labels when labels are on collisions with target labels. | `true` |
| `serviceMonitor.insecureSkipVerify` | Skip TLS certificate validation when scraping | `true` |
| `serviceMonitor.serverName` | Name of the server to use when validating TLS certificate | `nil` |
| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples after scraping, but before ingestion | `[]` |
| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` |
| `affinity` | Node affinity to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `topologySpreadConstraints` | Topology Spread Constraints to spread the descheduler cronjob/deployment across the cluster | `[]` |
| `tolerations` | tolerations to run the descheduler cronjob/deployment on specific nodes | `nil` |
| `suspend` | Set spec.suspend in descheduler cronjob | `false` |
| `commonLabels` | Labels to apply to all resources | `{}` |
| `livenessProbe` | Liveness probe configuration for the descheduler container | _see values.yaml_ |

View File

@@ -0,0 +1,12 @@
Descheduler installed as a {{ .Values.kind }}.
{{- if eq .Values.kind "Deployment" }}
{{- if eq (.Values.replicas | int) 1 }}
WARNING: You set replica count as 1 and workload kind as Deployment however leaderElection is not enabled. Consider enabling Leader Election for HA mode.
{{- end}}
{{- if .Values.leaderElection }}
{{- if and (hasKey .Values.cmdOptions "dry-run") (eq (get .Values.cmdOptions "dry-run") true) }}
WARNING: You enabled DryRun mode, you can't use Leader Election.
{{- end}}
{{- end}}
{{- end}}

View File

@@ -0,0 +1,104 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "descheduler.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "descheduler.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Expand the namespace of the release.
Allows overriding it for multi-namespace deployments in combined charts.
*/}}
{{- define "descheduler.namespace" -}}
{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "descheduler.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "descheduler.labels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
helm.sh/chart: {{ include "descheduler.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels}}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "descheduler.selectorLabels" -}}
app.kubernetes.io/name: {{ include "descheduler.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "descheduler.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "descheduler.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Leader Election
*/}}
{{- define "descheduler.leaderElection"}}
{{- if .Values.leaderElection -}}
- --leader-elect={{ .Values.leaderElection.enabled }}
{{- if .Values.leaderElection.leaseDuration }}
- --leader-elect-lease-duration={{ .Values.leaderElection.leaseDuration }}
{{- end }}
{{- if .Values.leaderElection.renewDeadline }}
- --leader-elect-renew-deadline={{ .Values.leaderElection.renewDeadline }}
{{- end }}
{{- if .Values.leaderElection.retryPeriod }}
- --leader-elect-retry-period={{ .Values.leaderElection.retryPeriod }}
{{- end }}
{{- if .Values.leaderElection.resourceLock }}
- --leader-elect-resource-lock={{ .Values.leaderElection.resourceLock }}
{{- end }}
{{- if .Values.leaderElection.resourceName }}
- --leader-elect-resource-name={{ .Values.leaderElection.resourceName }}
{{- end }}
{{/* resource namespace value starts with a typo so keeping resourceNamescape for backwards compatibility */}}
{{- $resourceNamespace := default .Values.leaderElection.resourceNamespace .Values.leaderElection.resourceNamescape -}}
{{- if $resourceNamespace -}}
- --leader-elect-resource-namespace={{ $resourceNamespace }}
{{- end -}}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,44 @@
{{- if .Values.rbac.create -}}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "delete"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: ["scheduling.k8s.io"]
resources: ["priorityclasses"]
verbs: ["get", "watch", "list"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["get", "watch", "list"]
{{- if .Values.leaderElection.enabled }}
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["{{ .Values.leaderElection.resourceName | default "descheduler" }}"]
verbs: ["get", "patch", "delete"]
{{- end }}
{{- if and .Values.deschedulerPolicy .Values.deschedulerPolicy.metricsCollector .Values.deschedulerPolicy.metricsCollector.enabled }}
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list"]
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,16 @@
{{- if .Values.rbac.create -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "descheduler.fullname" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "descheduler.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}
{{- end -}}

View File

@@ -0,0 +1,14 @@
{{- if .Values.deschedulerPolicy }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
data:
policy.yaml: |
apiVersion: "{{ .Values.deschedulerPolicyAPIVersion }}"
kind: "DeschedulerPolicy"
{{ toYaml .Values.deschedulerPolicy | trim | indent 4 }}
{{- end }}

View File

@@ -0,0 +1,111 @@
{{- if eq .Values.kind "CronJob" }}
apiVersion: {{ .Values.cronJobApiVersion | default "batch/v1" }}
kind: CronJob
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
schedule: {{ .Values.schedule | quote }}
{{- if .Values.suspend }}
suspend: {{ .Values.suspend }}
{{- end }}
concurrencyPolicy: "Forbid"
{{- if .Values.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.startingDeadlineSeconds }}
{{- end }}
{{- if ne .Values.successfulJobsHistoryLimit nil }}
successfulJobsHistoryLimit: {{ .Values.successfulJobsHistoryLimit }}
{{- end }}
{{- if ne .Values.failedJobsHistoryLimit nil }}
failedJobsHistoryLimit: {{ .Values.failedJobsHistoryLimit }}
{{- end }}
{{- if .Values.timeZone }}
timeZone: {{ .Values.timeZone }}
{{- end }}
jobTemplate:
spec:
{{- if .Values.ttlSecondsAfterFinished }}
ttlSecondsAfterFinished: {{ .Values.ttlSecondsAfterFinished }}
{{- end }}
template:
metadata:
name: {{ template "descheduler.fullname" . }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 12 }}
{{- end }}
labels:
{{- include "descheduler.selectorLabels" . | nindent 12 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 12 }}
{{- end }}
spec:
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
restartPolicy: "Never"
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 10 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 16 }}
args:
- --policy-config-file=/policy-dir/policy.yaml
{{- range $key, $value := .Values.cmdOptions }}
{{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
{{- end }}
{{- end }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 16 }}
ports:
{{- toYaml .Values.ports | nindent 16 }}
resources:
{{- toYaml .Values.resources | nindent 16 }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 16 }}
{{- end }}
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
{{- end }}
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- end }}

View File

@@ -0,0 +1,100 @@
{{- if eq .Values.kind "Deployment" }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
spec:
{{- if gt (.Values.replicas | int) 1 }}
{{- if not .Values.leaderElection.enabled }}
{{- fail "You must set leaderElection to use more than 1 replica"}}
{{- end}}
replicas: {{ required "leaderElection required for running more than one replica" .Values.replicas }}
{{- else }}
replicas: 1
{{- end }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "descheduler.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- .Values.podLabels | toYaml | nindent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.podAnnotations }}
{{- .Values.podAnnotations | toYaml | nindent 8 }}
{{- end }}
spec:
{{- if .Values.dnsConfig }}
dnsConfig:
{{- .Values.dnsConfig | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
serviceAccountName: {{ template "descheduler.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 6 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default (printf "v%s" .Chart.AppVersion) }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
{{- toYaml .Values.command | nindent 12 }}
args:
- --policy-config-file=/policy-dir/policy.yaml
- --descheduling-interval={{ required "deschedulingInterval required for running as Deployment" .Values.deschedulingInterval }}
{{- range $key, $value := .Values.cmdOptions }}
{{- if ne $value nil }}
- {{ printf "--%s=%s" $key (toString $value) }}
{{- else }}
- {{ printf "--%s" $key }}
{{- end }}
{{- end }}
{{- include "descheduler.leaderElection" . | nindent 12 }}
ports:
{{- toYaml .Values.ports | nindent 12 }}
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}
volumeMounts:
- mountPath: /policy-dir
name: policy-volume
{{- if .Values.podSecurityContext }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
volumes:
- name: policy-volume
configMap:
name: {{ template "descheduler.fullname" . }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,27 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.service.enabled true }}
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "descheduler.labels" . | nindent 4 }}
name: {{ template "descheduler.fullname" . }}
namespace: {{ include "descheduler.namespace" . }}
spec:
clusterIP: None
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- if .Values.service.ipFamilies }}
ipFamilies: {{ toYaml .Values.service.ipFamilies | nindent 4 }}
{{- end }}
ports:
- name: http-metrics
port: 10258
protocol: TCP
targetPort: 10258
selector:
{{- include "descheduler.selectorLabels" . | nindent 4 }}
type: ClusterIP
{{- end }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "descheduler.serviceAccountName" . }}
namespace: {{ include "descheduler.namespace" . }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceAccount.annotations }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,44 @@
{{- if eq .Values.kind "Deployment" }}
{{- if eq .Values.serviceMonitor.enabled true }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "descheduler.fullname" . }}-servicemonitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "descheduler.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ include "descheduler.namespace" . }}
selector:
matchLabels:
{{- include "descheduler.selectorLabels" . | nindent 6 }}
endpoints:
- honorLabels: {{ .Values.serviceMonitor.honorLabels | default true }}
port: http-metrics
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
scheme: https
tlsConfig:
{{- if eq .Values.serviceMonitor.insecureSkipVerify true }}
insecureSkipVerify: true
{{- end }}
{{- if .Values.serviceMonitor.serverName }}
serverName: {{ .Values.serviceMonitor.serverName }}
{{- end}}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{ tpl (toYaml .Values.serviceMonitor.metricRelabelings | indent 4) . }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings:
{{ tpl (toYaml .Values.serviceMonitor.relabelings | indent 4) . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
suite: Test Descheduler CronJob
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: CronJob
tests:
- it: creates CronJob when kind is set
template: templates/cronjob.yaml
asserts:
- isKind:
of: CronJob

View File

@@ -0,0 +1,49 @@
suite: Test Descheduler Deployment
templates:
- "*.yaml"
release:
name: descheduler
set:
kind: Deployment
tests:
- it: creates Deployment when kind is set
template: templates/deployment.yaml
asserts:
- isKind:
of: Deployment
- it: enables leader-election
set:
leaderElection:
enabled: true
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect=true
- it: support leader-election resourceNamespace
set:
leaderElection:
enabled: true
resourceNamespace: test
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=test
- it: support legacy leader-election resourceNamescape
set:
leaderElection:
enabled: true
resourceNamescape: typo
template: templates/deployment.yaml
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: --leader-elect-resource-namespace=typo

View File

@@ -0,0 +1,252 @@
# Default values for descheduler.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# CronJob or Deployment
kind: CronJob
image:
repository: registry.k8s.io/descheduler/descheduler
# Overrides the image tag whose default is the chart version
tag: ""
pullPolicy: IfNotPresent
imagePullSecrets:
# - name: container-registry-secret
resources:
requests:
cpu: 500m
memory: 256Mi
limits:
cpu: 500m
memory: 256Mi
ports:
- containerPort: 10258
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# podSecurityContext -- [Security context for pod](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
podSecurityContext: {}
# fsGroup: 1000
nameOverride: ""
fullnameOverride: ""
# -- Override the deployment namespace; defaults to .Release.Namespace
namespaceOverride: ""
# labels that'll be applied to all resources
commonLabels: {}
cronJobApiVersion: "batch/v1"
schedule: "*/2 * * * *"
suspend: false
# startingDeadlineSeconds: 200
# successfulJobsHistoryLimit: 3
# failedJobsHistoryLimit: 1
# ttlSecondsAfterFinished 600
# timeZone: Etc/UTC
# Required when running as a Deployment
deschedulingInterval: 5m
# Specifies the replica count for Deployment
# Set leaderElection if you want to use more than 1 replica
# Set affinity.podAntiAffinity rule if you want to schedule onto a node
# only if that node is in the same zone as at least one already-running descheduler
replicas: 1
# Specifies whether Leader Election resources should be created
# Required when running as a Deployment
# NOTE: Leader election can't be activated if DryRun enabled
leaderElection: {}
# enabled: true
# leaseDuration: 15s
# renewDeadline: 10s
# retryPeriod: 2s
# resourceLock: "leases"
# resourceName: "descheduler"
# resourceNamespace: "kube-system"
command:
- "/bin/descheduler"
cmdOptions:
v: 3
# Recommended to use the latest Policy API version supported by the Descheduler app version
deschedulerPolicyAPIVersion: "descheduler/v1alpha2"
# deschedulerPolicy contains the policies the descheduler will execute.
# To use policies stored in an existing configMap use:
# NOTE: The name of the cm should comply to {{ template "descheduler.fullname" . }}
# deschedulerPolicy: {}
deschedulerPolicy:
# nodeSelector: "key1=value1,key2=value2"
# maxNoOfPodsToEvictPerNode: 10
# maxNoOfPodsToEvictPerNamespace: 10
# metricsCollector:
# enabled: true
# ignorePvcPods: true
# evictLocalStoragePods: true
# evictDaemonSetPods: true
# tracing:
# collectorEndpoint: otel-collector.observability.svc.cluster.local:4317
# transportCert: ""
# serviceName: ""
# serviceNamespace: ""
# sampleRate: 1.0
# fallbackToNoOpProviderOnError: true
profiles:
- name: default
pluginConfig:
- name: DefaultEvictor
args:
ignorePvcPods: true
evictLocalStoragePods: true
- name: RemoveDuplicates
- name: RemovePodsHavingTooManyRestarts
args:
podRestartThreshold: 100
includingInitContainers: true
- name: RemovePodsViolatingNodeAffinity
args:
nodeAffinityType:
- requiredDuringSchedulingIgnoredDuringExecution
- name: RemovePodsViolatingNodeTaints
- name: RemovePodsViolatingInterPodAntiAffinity
- name: RemovePodsViolatingTopologySpreadConstraint
- name: LowNodeUtilization
args:
thresholds:
cpu: 40
memory: 30
pods: 30
targetThresholds:
cpu: 50
memory: 60
pods: 50
plugins:
balance:
enabled:
- RemoveDuplicates
- RemovePodsViolatingTopologySpreadConstraint
- LowNodeUtilization
deschedule:
enabled:
- RemovePodsHavingTooManyRestarts
- RemovePodsViolatingNodeTaints
- RemovePodsViolatingNodeAffinity
- RemovePodsViolatingInterPodAntiAffinity
priorityClassName: system-cluster-critical
nodeSelector: {}
# foo: bar
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: kubernetes.io/e2e-az-name
# operator: In
# values:
# - e2e-az1
# - e2e-az2
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - descheduler
# topologyKey: "kubernetes.io/hostname"
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: kubernetes.io/hostname
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app.kubernetes.io/name: descheduler
tolerations: []
# - key: 'management'
# operator: 'Equal'
# value: 'tool'
# effect: 'NoSchedule'
rbac:
# Specifies whether RBAC resources should be created
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
# Specifies custom annotations for the serviceAccount
annotations: {}
podAnnotations: {}
podLabels: {}
dnsConfig: {}
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 3
periodSeconds: 10
service:
enabled: false
# @param service.ipFamilyPolicy [string], support SingleStack, PreferDualStack and RequireDualStack
#
ipFamilyPolicy: ""
# @param service.ipFamilies [array] List of IP families (e.g. IPv4, IPv6) assigned to the service.
# Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/
# E.g.
# ipFamilies:
# - IPv6
# - IPv4
ipFamilies: []
serviceMonitor:
enabled: false
# The namespace where Prometheus expects to find service monitors.
# namespace: ""
# Add custom labels to the ServiceMonitor resource
additionalLabels: {}
# prometheus: kube-prometheus-stack
interval: ""
# honorLabels: true
insecureSkipVerify: true
serverName: null
metricRelabelings: []
# - action: keep
# regex: 'descheduler_(build_info|pods_evicted)'
# sourceLabels: [__name__]
relabelings: []
# - sourceLabels: [__meta_kubernetes_pod_node_name]
# separator: ;
# regex: ^(.*)$
# targetLabel: nodename
# replacement: $1
# action: replace

10217
deploy/elk-stack/crds.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: elasticsearch-ha
spec:
version: 8.17.4
nodeSets:
- name: default
count: 3
config:
node.store.allow_mmap: false
# volumeClaimTemplates:
# - metadata:
# name: elasticsearch-data
# spec:
# accessModes:
# - ReadWriteOnce
# storageClassName: longhorn
# resources:
# requests:
# storage: 5Gi

View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana-ingress
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
tls:
- hosts:
- query.prod.panic.haus
secretName: kibana-tls
rules:
- host: query.prod.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana-ha-kb-http
port:
number: 5601

View File

@@ -0,0 +1,9 @@
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana-ha
spec:
version: 8.17.4
count: 2
elasticsearchRef:
name: elasticsearch-ha

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: elastic-system
resources:
# - crds.yaml
- operator.yaml
- elasticsearch.yaml
- kibana.yaml
- kibana-ingress.yaml
# - longstash.yaml

View File

@@ -0,0 +1,58 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: elasticsearch
spec:
replicas: 2
selector:
matchLabels:
app: logstash
template:
metadata:
labels:
app: logstash
spec:
containers:
- name: logstash
image: docker.io/bitnami/logstash:7.17.0
# Customize environment variables and command-line args as needed
env:
- name: ELASTICSEARCH_HOST
value: "elasticsearch-ha-es-http" # Adjust based on your ES service DNS name
ports:
- containerPort: 9600
volumeMounts:
- name: logstash-data
mountPath: /bitnami
volumes:
- name: logstash-data
persistentVolumeClaim:
claimName: logstash-pvc
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: logstash-pvc
namespace: elasticsearch
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
name: logstash
namespace: elasticsearch
spec:
selector:
app: logstash
ports:
- protocol: TCP
port: 9600
targetPort: 9600

View File

@@ -0,0 +1,797 @@
# Source: eck-operator/templates/operator-namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: elastic-system
labels:
name: elastic-system
---
# Source: eck-operator/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: true
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: v1
kind: Secret
metadata:
name: elastic-webhook-server-cert
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
---
# Source: eck-operator/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
data:
eck.yaml: |-
log-verbosity: 0
metrics-port: 0
metrics-secure: false
container-registry: docker.elastic.co
max-concurrent-reconciles: 3
ca-cert-validity: 8760h
ca-cert-rotate-before: 24h
cert-validity: 8760h
cert-rotate-before: 24h
disable-config-watch: false
exposed-node-labels: [topology.kubernetes.io/.*,failure-domain.beta.kubernetes.io/.*]
set-default-security-context: auto-detect
kube-client-timeout: 60s
elasticsearch-client-timeout: 180s
disable-telemetry: false
distribution-channel: all-in-one
validate-storage-class: true
enable-webhook: true
webhook-name: elastic-webhook.k8s.elastic.co
webhook-port: 9443
operator-namespace: elastic-system
enable-leader-election: true
elasticsearch-observation-interval: 10s
ubi-only: false
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: elastic-operator
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
rules:
- apiGroups:
- "authorization.k8s.io"
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- elastic-operator-leader
verbs:
- get
- watch
- update
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods
- events
- persistentvolumeclaims
- secrets
- services
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- apps
resources:
- deployments
- statefulsets
- daemonsets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- elasticsearch.k8s.elastic.co
resources:
- elasticsearches
- elasticsearches/status
- elasticsearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- autoscaling.k8s.elastic.co
resources:
- elasticsearchautoscalers
- elasticsearchautoscalers/status
- elasticsearchautoscalers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- kibana.k8s.elastic.co
resources:
- kibanas
- kibanas/status
- kibanas/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- apm.k8s.elastic.co
resources:
- apmservers
- apmservers/status
- apmservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- enterprisesearch.k8s.elastic.co
resources:
- enterprisesearches
- enterprisesearches/status
- enterprisesearches/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- beat.k8s.elastic.co
resources:
- beats
- beats/status
- beats/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- agent.k8s.elastic.co
resources:
- agents
- agents/status
- agents/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- maps.k8s.elastic.co
resources:
- elasticmapsservers
- elasticmapsservers/status
- elasticmapsservers/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- stackconfigpolicy.k8s.elastic.co
resources:
- stackconfigpolicies
- stackconfigpolicies/status
- stackconfigpolicies/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- logstash.k8s.elastic.co
resources:
- logstashes
- logstashes/status
- logstashes/finalizers # needed for ownerReferences with blockOwnerDeletion on OCP
verbs:
- get
- list
- watch
- create
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "elastic-operator-view"
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["autoscaling.k8s.elastic.co"]
resources: ["elasticsearchautoscalers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["kibana.k8s.elastic.co"]
resources: ["kibanas"]
verbs: ["get", "list", "watch"]
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
resources: ["enterprisesearches"]
verbs: ["get", "list", "watch"]
- apiGroups: ["beat.k8s.elastic.co"]
resources: ["beats"]
verbs: ["get", "list", "watch"]
- apiGroups: ["agent.k8s.elastic.co"]
resources: ["agents"]
verbs: ["get", "list", "watch"]
- apiGroups: ["maps.k8s.elastic.co"]
resources: ["elasticmapsservers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
resources: ["stackconfigpolicies"]
verbs: ["get", "list", "watch"]
- apiGroups: ["logstash.k8s.elastic.co"]
resources: ["logstashes"]
verbs: ["get", "list", "watch"]
---
# Source: eck-operator/templates/cluster-roles.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: "elastic-operator-edit"
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
rules:
- apiGroups: ["elasticsearch.k8s.elastic.co"]
resources: ["elasticsearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["autoscaling.k8s.elastic.co"]
resources: ["elasticsearchautoscalers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["apm.k8s.elastic.co"]
resources: ["apmservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["kibana.k8s.elastic.co"]
resources: ["kibanas"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["enterprisesearch.k8s.elastic.co"]
resources: ["enterprisesearches"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["beat.k8s.elastic.co"]
resources: ["beats"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["agent.k8s.elastic.co"]
resources: ["agents"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["maps.k8s.elastic.co"]
resources: ["elasticmapsservers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["stackconfigpolicy.k8s.elastic.co"]
resources: ["stackconfigpolicies"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["logstash.k8s.elastic.co"]
resources: ["logstashes"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
# Source: eck-operator/templates/role-bindings.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: elastic-operator
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: elastic-operator
subjects:
- kind: ServiceAccount
name: elastic-operator
namespace: elastic-system
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: v1
kind: Service
metadata:
name: elastic-webhook-server
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
spec:
ports:
- name: https
port: 443
targetPort: 9443
selector:
control-plane: elastic-operator
---
# Source: eck-operator/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elastic-operator
namespace: elastic-system
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
spec:
selector:
matchLabels:
control-plane: elastic-operator
serviceName: elastic-operator
replicas: 1
template:
metadata:
annotations:
# Rename the fields "error" to "error.message" and "source" to "event.source"
# This is to avoid a conflict with the ECS "error" and "source" documents.
"co.elastic.logs/raw": "[{\"type\":\"container\",\"json.keys_under_root\":true,\"paths\":[\"/var/log/containers/*${data.kubernetes.container.id}.log\"],\"processors\":[{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"error\",\"to\":\"_error\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_error\",\"to\":\"error.message\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"source\",\"to\":\"_source\"}]}},{\"convert\":{\"mode\":\"rename\",\"ignore_missing\":true,\"fields\":[{\"from\":\"_source\",\"to\":\"event.source\"}]}}]}]"
"checksum/config": 7c44077bea6cc3ad577d0f45159ddc8c6096c69128668786fc104a4ce081d4d2
labels:
control-plane: elastic-operator
spec:
terminationGracePeriodSeconds: 10
serviceAccountName: elastic-operator
automountServiceAccountToken: true
securityContext:
runAsNonRoot: true
containers:
- image: "docker.elastic.co/eck/eck-operator:2.16.1"
imagePullPolicy: IfNotPresent
name: manager
args:
- "manager"
- "--config=/conf/eck.yaml"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: WEBHOOK_SECRET
value: elastic-webhook-server-cert
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 100m
memory: 150Mi
ports:
- containerPort: 9443
name: https-webhook
protocol: TCP
volumeMounts:
- mountPath: "/conf"
name: conf
readOnly: true
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
volumes:
- name: conf
configMap:
name: elastic-operator
- name: cert
secret:
defaultMode: 420
secretName: elastic-webhook-server-cert
---
# Source: eck-operator/templates/webhook.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: elastic-webhook.k8s.elastic.co
labels:
control-plane: elastic-operator
app.kubernetes.io/version: "2.16.1"
webhooks:
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-agent-k8s-elastic-co-v1alpha1-agent
failurePolicy: Ignore
name: elastic-agent-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- agent.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- agents
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-apm-k8s-elastic-co-v1-apmserver
failurePolicy: Ignore
name: elastic-apm-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- apm.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- apmservers
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-apm-k8s-elastic-co-v1beta1-apmserver
failurePolicy: Ignore
name: elastic-apm-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- apm.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- apmservers
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-beat-k8s-elastic-co-v1beta1-beat
failurePolicy: Ignore
name: elastic-beat-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- beat.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- beats
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-enterprisesearch-k8s-elastic-co-v1-enterprisesearch
failurePolicy: Ignore
name: elastic-ent-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- enterprisesearch.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- enterprisesearches
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-enterprisesearch-k8s-elastic-co-v1beta1-enterprisesearch
failurePolicy: Ignore
name: elastic-ent-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- enterprisesearch.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- enterprisesearches
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-elasticsearch-k8s-elastic-co-v1-elasticsearch
failurePolicy: Ignore
name: elastic-es-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- elasticsearch.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- elasticsearches
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-elasticsearch-k8s-elastic-co-v1beta1-elasticsearch
failurePolicy: Ignore
name: elastic-es-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- elasticsearch.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- elasticsearches
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-ems-k8s-elastic-co-v1alpha1-mapsservers
failurePolicy: Ignore
name: elastic-ems-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- maps.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- mapsservers
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-kibana-k8s-elastic-co-v1-kibana
failurePolicy: Ignore
name: elastic-kb-validation-v1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- kibana.k8s.elastic.co
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- kibanas
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-kibana-k8s-elastic-co-v1beta1-kibana
failurePolicy: Ignore
name: elastic-kb-validation-v1beta1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- kibana.k8s.elastic.co
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- kibanas
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-autoscaling-k8s-elastic-co-v1alpha1-elasticsearchautoscaler
failurePolicy: Ignore
name: elastic-esa-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- autoscaling.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- elasticsearchautoscalers
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-scp-k8s-elastic-co-v1alpha1-stackconfigpolicies
failurePolicy: Ignore
name: elastic-scp-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- stackconfigpolicy.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- stackconfigpolicies
- clientConfig:
service:
name: elastic-webhook-server
namespace: elastic-system
path: /validate-logstash-k8s-elastic-co-v1alpha1-logstash
failurePolicy: Ignore
name: elastic-logstash-validation-v1alpha1.k8s.elastic.co
matchPolicy: Exact
admissionReviewVersions: [v1, v1beta1]
sideEffects: None
rules:
- apiGroups:
- logstash.k8s.elastic.co
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- logstashes

5
deploy/gitea/values.yaml Normal file
View File

@@ -0,0 +1,5 @@
persistence:
enabled: true
storageClass: longhorn
accessMode: ReadWriteOnce
size: 20Gi

View File

@@ -13,3 +13,12 @@ data:
url: http://prometheus-k8s.monitoring:9090
isDefault: true
editable: false
- name: Elasticsearch
type: elasticsearch
access: proxy
url: https://elasticsearch-ha-es-http.elastic-system.svc:9200
jsonData:
esVersion: 8.17.4
timeField: "@timestamp"
tlsSkipVerify: true
editable: false

View File

@@ -5,4 +5,4 @@ resources:
- grafana-deploy.yaml
- grafana-ingress.yaml
- grafana-svc.yaml
- prometheus-ds.yaml
- data-sources.yaml

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Secret
metadata:
name: grafana-db-secret
namespace: grafana
type: Opaque
data:
username: Z3JhZmFuYQ==
password: dndyMGc5aWpoMGIzaXJka3ZqMG1ndXZoM3I=
---
apiVersion: v1
kind: Secret
metadata:
name: grafana-oauth-secret
namespace: grafana
type: Opaque
data:
client-secret: VFVEYU5uY091b1Y1QzFmeUJaeXN3ZzNEU3VYWU9laEQ=

View File

@@ -114,13 +114,13 @@ spec:
httpGet:
path: /health/live
port: 9000 # Use management port for liveness
initialDelaySeconds: 60
initialDelaySeconds: 90
periodSeconds: 30
readinessProbe:
httpGet:
path: /health/ready
port: 9000 # Use management port for readiness
initialDelaySeconds: 30
initialDelaySeconds: 60
periodSeconds: 15
affinity:
# Spread pods across different nodes for higher availability

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: keycloak-db-secret
namespace: keycloak
type: Opaque
data:
username: a2V5Y2xvYWs= # base64 encoded
password: dTgyNXFDTnhmckJTY0tUb1RkM1c5ektWUHhwVnNpN0w= # base64 encoded

View File

@@ -2,3 +2,6 @@ namespace: longhorn-system
resources:
- longhorn-deploy.yaml
- longhorn-ingress.yaml
- oauth2-proxy-longhorn-ingress.yaml
- oauth2-proxy-longhorn-service.yaml
- oauth2-proxy-longhorn.yaml

View File

@@ -103,7 +103,7 @@ data:
reclaimPolicy: "Delete"
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "3"
numberOfReplicas: "1"
staleReplicaTimeout: "30"
fromBackup: ""
fsType: "ext4"

View File

@@ -4,15 +4,15 @@ metadata:
name: longhorn-ingress
namespace: longhorn-system
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2/start?rd=$escaped_request_uri
nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2/auth
spec:
ingressClassName: nginx
tls:
- hosts:
- longhorn.prod.panic.haus
secretName: longhorn-tls
rules:
- host: longhorn.prod.panic.haus
http:

View File

@@ -0,0 +1,19 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: oauth2-proxy-longhorn-ingress
namespace: longhorn-system
annotations:
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: longhorn.prod.panic.haus
http:
paths:
- path: /oauth2
pathType: Prefix
backend:
service:
name: oauth2-proxy-longhorn-service
port:
number: 4180

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: oauth2-proxy-longhorn-service
namespace: longhorn-system
spec:
ports:
- port: 4180
targetPort: 4180
protocol: TCP
name: http
selector:
app: oauth2-proxy-longhorn

View File

@@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: oauth2-proxy-longhorn
namespace: longhorn-system
spec:
replicas: 1
selector:
matchLabels:
app: oauth2-proxy-longhorn
template:
metadata:
labels:
app: oauth2-proxy-longhorn
spec:
containers:
- name: oauth2-proxy-longhorn
image: quay.io/oauth2-proxy/oauth2-proxy:v7.8.2
args:
- --provider=keycloak
- --client-id=longhorn
- --client-secret=gxyMUP89svnEXnz128ZqNBTLxjlLpBxM
- --cookie-secret=1arlufhiIIvTT3iPexXVREeo8YDX-ZLk3k33-98FPRM=
- --oidc-issuer-url=https://sso.panic.haus/realms/panic-haus
- --cookie-domain=longhorn.prod.panic.haus
- --email-domain=*
# - --session-store-type=redis
# - --redis-connection-url=redis://redis-lb.redis.svc.cluster.local:6379
- --http-address=0.0.0.0:4180
- --redirect-url=https://longhorn.prod.panic.haus/oauth2/callback
- --upstream=http://longhorn-frontend.longhorn-system.svc.cluster.local:80
- --scope=openid
- --login-url=https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/auth
- --validate-url=https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/userinfo
- --redeem-url=https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/token
ports:
- containerPort: 4180
name: http

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: minio-operator-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: minio-operator-role
subjects:
- kind: ServiceAccount
name: minio-operator
namespace: default

View File

@@ -0,0 +1,178 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: minio-operator-role
rules:
- apiGroups:
- "apiextensions.k8s.io"
resources:
- customresourcedefinitions
verbs:
- get
- update
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- update
- list
- apiGroups:
- ""
resources:
- namespaces
- nodes
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- pods
- services
- events
- configmaps
verbs:
- get
- watch
- create
- list
- delete
- deletecollection
- update
- patch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- watch
- create
- update
- list
- delete
- deletecollection
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- statefulsets
- deployments
- deployments/finalizers
verbs:
- get
- create
- list
- patch
- watch
- update
- delete
- apiGroups:
- batch
resources:
- jobs
verbs:
- get
- create
- list
- patch
- watch
- update
- delete
- apiGroups:
- "certificates.k8s.io"
resources:
- "certificatesigningrequests"
- "certificatesigningrequests/approval"
- "certificatesigningrequests/status"
verbs:
- update
- create
- get
- delete
- list
- apiGroups:
- certificates.k8s.io
resourceNames:
- kubernetes.io/legacy-unknown
- kubernetes.io/kube-apiserver-client
- kubernetes.io/kubelet-serving
- beta.eks.amazonaws.com/app-serving
resources:
- signers
verbs:
- approve
- sign
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- minio.min.io
- sts.min.io
- job.min.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- min.io
resources:
- "*"
verbs:
- "*"
- apiGroups:
- monitoring.coreos.com
resources:
- prometheuses
verbs:
- '*'
- apiGroups:
- "coordination.k8s.io"
resources:
- leases
verbs:
- get
- update
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- create
- delete
- get
- list
- patch
- update
- deletecollection

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- minio.min.io_tenants.yaml
- sts.min.io_policybindings.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,133 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.17.2
operator.min.io/version: v7.0.1
name: policybindings.sts.min.io
spec:
group: sts.min.io
names:
kind: PolicyBinding
listKind: PolicyBindingList
plural: policybindings
shortNames:
- policybinding
singular: policybinding
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .status.currentState
name: State
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
properties:
application:
properties:
namespace:
type: string
serviceaccount:
type: string
required:
- namespace
- serviceaccount
type: object
policies:
items:
type: string
type: array
required:
- application
- policies
type: object
status:
properties:
currentState:
type: string
usage:
nullable: true
properties:
authotizations:
format: int64
type: integer
type: object
required:
- currentState
- usage
type: object
type: object
served: true
storage: false
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .status.currentState
name: State
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
schema:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
properties:
application:
properties:
namespace:
type: string
serviceaccount:
type: string
required:
- namespace
- serviceaccount
type: object
policies:
items:
type: string
type: array
required:
- application
- policies
type: object
status:
properties:
currentState:
type: string
usage:
nullable: true
properties:
authotizations:
format: int64
type: integer
type: object
required:
- currentState
- usage
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-operator
namespace: minio-operator
labels:
app.kubernetes.io/instance: minio-operator
app.kubernetes.io/name: operator
spec:
replicas: 2
selector:
matchLabels:
name: minio-operator
strategy:
type: Recreate
template:
metadata:
labels:
name: minio-operator
app.kubernetes.io/instance: minio-operator
app.kubernetes.io/name: operator
spec:
serviceAccountName: minio-operator
containers:
- name: minio-operator
image: minio/operator:v7.0.1
imagePullPolicy: IfNotPresent
args:
- controller
resources:
requests:
cpu: 200m
memory: 256Mi
ephemeral-storage: 500Mi
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
env:
- name: MINIO_CONSOLE_TLS_ENABLE
value: "off"
- name: OPERATOR_STS_ENABLED
value: "on"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: name
operator: In
values:
- minio-operator
topologyKey: kubernetes.io/hostname

View File

@@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- crds/

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Namespace
metadata:
name: minio-operator
labels:
pod-security.kubernetes.io/enforce: restricted
pod-security.kubernetes.io/enforce-version: latest
pod-security.kubernetes.io/audit: restricted
pod-security.kubernetes.io/audit-version: latest
pod-security.kubernetes.io/warn: restricted
pod-security.kubernetes.io/warn-version: latest

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: minio-operator
namespace: default

View File

@@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
name: operator # Please do not change this value
labels:
name: minio-operator
app.kubernetes.io/instance: minio-operator
app.kubernetes.io/name: operator
namespace: minio-operator
spec:
type: ClusterIP
ports:
- port: 4221
name: http
selector:
name: minio-operator
operator: leader
---
apiVersion: v1
kind: Service
metadata:
name: sts # Please do not change this value
labels:
name: minio-operator
namespace: minio-operator
spec:
type: ClusterIP
ports:
- port: 4223
targetPort: 4223
name: https
selector:
name: minio-operator

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minio-operator
commonAnnotations:
operator.min.io/authors: "MinIO, Inc."
operator.min.io/license: "AGPLv3"
operator.min.io/support: "https://subnet.min.io"
operator.min.io/version: v7.0.1
commonLabels:
app.kubernetes.io/name: operator
resources:
- base/namespace.yaml
- base/service-account.yaml
- base/cluster-role.yaml
- base/cluster-role-binding.yaml
- base/crds/
- base/service.yaml
- base/deployment.yaml

View File

@@ -0,0 +1,37 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: minio-ingress
namespace: minio-tenant
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
spec:
rules:
- host: s3.minio.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio
port:
number: 9000
- host: console.minio.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio-console
port:
number: 9090
tls:
- hosts:
- s3.minio.panic.haus
- console.minio.panic.haus
secretName: minio-tls

View File

@@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minio-tenant
resources:
- namespace.yaml
- secret.yaml
- tenant.yaml
- ingress.yaml
- svc-minio.yaml
- svc-minio-console.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: minio-tenant

View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Secret
metadata:
name: storage-configuration
namespace: minio-tenant
stringData:
config.env: |-
export MINIO_ROOT_USER="rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow"
export MINIO_ROOT_PASSWORD="kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z"
export MINIO_DOMAIN="s3.minio.panic.haus"
export MINIO_BROWSER_REDIRECT_URL="https://console.minio.panic.haus"
MINIO_REGION_NAME="cluster-panic-haus"
type: Opaque
---
apiVersion: v1
data:
CONSOLE_ACCESS_KEY: Y29uc29sZQ==
CONSOLE_SECRET_KEY: ZGRhTDBZSHhlTnR2ZDM4SVI5TVdtS3VFU21ONE00NG4=
kind: Secret
metadata:
name: storage-user
namespace: minio-tenant
type: Opaque

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: minio-console
namespace: minio-tenant
labels:
app: minio
spec:
type: ClusterIP
selector:
v1.min.io/tenant: panic-minio
ports:
- name: http
port: 9090
targetPort: 9090
protocol: TCP

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio-tenant
labels:
app: minio
spec:
type: ClusterIP
selector:
v1.min.io/tenant: panic-minio
ports:
- name: http-minio
port: 80
targetPort: 9000
protocol: TCP

View File

@@ -0,0 +1,79 @@
apiVersion: minio.min.io/v2
kind: Tenant
metadata:
annotations:
prometheus.io/path: /minio/v2/metrics/cluster
prometheus.io/port: "9000"
prometheus.io/scrape: "true"
labels:
app: minio
name: panic-minio
namespace: minio-tenant
spec:
exposeServices: {}
imagePullPolicy: IfNotPresent
certConfig: {}
configuration:
name: storage-configuration
env: []
requestAutoCert: false
externalCertSecret: []
externalCaCertSecret: []
externalClientCertSecrets: []
features:
bucketDNS: false
domains: {}
image: quay.io/minio/minio:RELEASE.2025-04-03T14-56-28Z
imagePullSecret: {}
mountPath: /export
podManagementPolicy: Parallel
pools:
- name: pool-0
affinity:
nodeAffinity: {}
podAffinity: {}
podAntiAffinity: {}
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
nodeSelector: {}
resources: {}
securityContext:
fsGroup: 1000
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
servers: 4
tolerations: []
topologySpreadConstraints: []
volumeClaimTemplate:
apiVersion: v1
kind: persistentvolumeclaims
metadata: {}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn
status: {}
volumesPerServer: 1
priorityClassName: ""
serviceAccountName: ""
serviceMetadata:
consoleServiceAnnotations: {}
consoleServiceLabels: {}
minioServiceAnnotations: {}
minioServiceLabels: {}
subPath: ""
users:
- name: storage-user

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: n8n
resources:
- n8n-claim0-persistentvolumeclaim.yaml
- n8n-ingress.yaml
- namespace.yaml
- n8n-deployment.yaml
- n8n-service.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
labels:
service: n8n-claim0
name: n8n-claim0
namespace: n8n
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
service: n8n
name: n8n
namespace: n8n
spec:
replicas: 1
selector:
matchLabels:
service: n8n
strategy:
type: Recreate
template:
metadata:
labels:
service: n8n
spec:
initContainers:
- name: volume-permissions
image: busybox:1.36
command: ["sh", "-c", "chown 1000:1000 /data"]
volumeMounts:
- name: n8n-claim0
mountPath: /data
containers:
- command:
- /bin/sh
args:
- -c
- sleep 5; n8n start
env:
- name: N8N_EDITOR_BASE_URL
value: https://n8n.prod.panic.haus/
- name: WEBHOOK_URL
value: https://n8n.prod.panic.haus/
- name: DB_TYPE
value: postgresdb
- name: DB_POSTGRESDB_HOST
value: postgres-base-rw.postgres.svc.cluster.local
- name: DB_POSTGRESDB_PORT
value: "5432"
- name: DB_POSTGRESDB_DATABASE
value: n8ndb
- name: DB_POSTGRESDB_USER
valueFrom:
secretKeyRef:
name: n8n-db-secret
key: username
- name: DB_POSTGRESDB_PASSWORD
valueFrom:
secretKeyRef:
name: n8n-db-secret
key: password
- name: N8N_PROTOCOL
value: http
- name: N8N_PORT
value: "5678"
image: n8nio/n8n
name: n8n
ports:
- containerPort: 5678
resources:
requests:
memory: "250Mi"
limits:
memory: "500Mi"
volumeMounts:
- mountPath: /home/node/.n8n
name: n8n-claim0
restartPolicy: Always
volumes:
- name: n8n-claim0
persistentVolumeClaim:
claimName: n8n-claim0
- name: n8n-secret
secret:
secretName: n8n-secret
- name: n8n-db-secret
secret:
secretName: n8n-db-secret

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: n8n
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
# nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- n8n.prod.panic.haus
secretName: n8n-tls
rules:
- host: n8n.prod.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: n8n
port:
number: 5678

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
labels:
service: n8n
name: n8n
namespace: n8n
spec:
type: ClusterIP
ports:
- name: "5678"
port: 5678
targetPort: 5678
protocol: TCP
selector:
service: n8n

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: n8n

9
deploy/n8n/secret.yaml Normal file
View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: n8n-db-secret
namespace: n8n
type: Opaque
data:
username: bjhu # base64 encoded
password: SHFCTkdHcndzN1VFSk5tUDJRa3lIWGF6YkJaN3lTUkY= # base64 encoded

View File

@@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: outline
namespace: outline-wiki
labels:
app: outline
spec:
replicas: 3
selector:
matchLabels:
app: outline
template:
metadata:
labels:
app: outline
spec:
containers:
- name: outline
image: outlinewiki/outline:0.84.0
ports:
- containerPort: 8089
envFrom:
- secretRef:
name: outline-secrets
env:
- name: PORT
value: "8089"

View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: outline-ingress
namespace: outline-wiki
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
spec:
ingressClassName: nginx
tls:
- hosts:
- outline.panic.haus
secretName: outline-wiki-tls
rules:
- host: outline.panic.haus
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: outline
port:
number: 8089

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: outline-wiki
resources:
- deploy.yaml
- service.yaml
- secret.yaml
- ingress.yaml

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: Secret
metadata:
name: outline-secrets
namespace: outline-wiki
type: Opaque
stringData:
SECRET_KEY: eae7766055bb20e0b6fb6838cc889121697e59a2b82fd1590dc47a91489acd95
UTILS_SECRET: f9e0e1158b7ec2239b465c602172493ee2d1b0765ca6b659b35f64959408492d
DATABASE_URL: postgres://outline:ULYpprqxQeS2rSBXF8NxEr4FhJkUAwhWJtkwZij6XwBDSvUUKeAifBBG885fPSmd@postgres-base-rw.postgres/outlinedb
REDIS_URL: redis://redis-lb.redis.svc.cluster.local:6379
URL: https://outline.panic.haus
PGSSLMODE: disable
AWS_ACCESS_KEY_ID: rjtPFRp52DgmWb4kdsyiFKjtBMxYSaow
AWS_SECRET_ACCESS_KEY: kabSK8RXcONjO8I7GNfJ03WMueJ7fk6z
AWS_S3_UPLOAD_BUCKET_URL: https://s3.minio.panic.haus/
AWS_REGION: cluster-panic-haus
AWS_S3_UPLOAD_BUCKET_NAME: outline
FILE_STORAGE_UPLOAD_MAX_SIZE: "26214400"
AWS_S3_FORCE_PATH_STYLE: "true"
AWS_S3_ACL: private
OIDC_DISPLAY_NAME: panicSSO
OIDC_CLIENT_ID: outline
OIDC_CLIENT_SECRET: W4KxpMkWiRL5EU8yknamRkkZpFFQ1rKN
OIDC_AUTH_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/auth?scope=openid
OIDC_TOKEN_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/token?scope=openid
OIDC_USERINFO_URI: https://sso.panic.haus/realms/panic-haus/protocol/openid-connect/userinfo?scope=openid
SMTP_HOST: mail.mind-overflow.net
SMTP_PORT: "465"
SMTP_USERNAME: cloud@mind-overflow.net
SMTP_PASSWORD: PcYchuLLUyfT2gvY4Tx7wQ575Tnqjx84zVNoP6Mb
SMTP_FROM_EMAIL: cloud@mind-overflow.net
SMTP_REPLY_EMAIL: cloud@mind-overflow.net
SMTP_SECURE: "true"

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: outline
namespace: outline-wiki
spec:
selector:
app: outline
ports:
- name: http
port: 80
targetPort: 8089
type: ClusterIP

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: clickhouse-config
data:
clickhouse-config.xml: |
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
</logger>
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
<text_log remove="remove"/>
<trace_log remove="remove"/>
<metric_log remove="remove"/>
<asynchronous_metric_log remove="remove"/>
<!-- Update: Required for newer versions of Clickhouse -->
<session_log remove="remove"/>
<part_log remove="remove"/>
</clickhouse>

View File

@@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse
spec:
replicas: 1
selector:
matchLabels:
app: clickhouse
template:
metadata:
labels:
app: clickhouse
spec:
containers:
- name: clickhouse
image: clickhouse/clickhouse-server:22.6-alpine
# You may expose ports if needed (for example, HTTP on 8123)
ports:
- containerPort: 8123
volumeMounts:
- name: event-data
mountPath: /var/lib/clickhouse
- name: clickhouse-config
mountPath: /etc/clickhouse-server/config.d/logging.xml
subPath: clickhouse-config.xml
readOnly: true
- name: clickhouse-user-config
mountPath: /etc/clickhouse-server/users.d/logging.xml
subPath: clickhouse-user-config.xml
readOnly: true
volumes:
- name: event-data
persistentVolumeClaim:
claimName: event-data-pvc
- name: clickhouse-config
configMap:
name: clickhouse-config
- name: clickhouse-user-config
configMap:
name: clickhouse-user-config

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: event-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: longhorn

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: clickhouse
labels:
app: clickhouse
spec:
ports:
- name: http
protocol: TCP
port: 8123
targetPort: 8123
selector:
app: clickhouse

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: clickhouse-user-config
data:
clickhouse-user-config.xml: |
<clickhouse>
<profiles>
<default>
<log_queries>0</log_queries>
<log_query_threads>0</log_query_threads>
</default>
</profiles>
</clickhouse>

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: plausible
resources:
- clickhouse-config.yaml
- clickhouse-pvc.yaml
- clickhouse-svc.yaml
- mail-svc.yaml
- plausible-secret.yaml
- clickhouse-deploy.yaml
- clickhouse-user-config.yaml
- mail-deploy.yaml
- plausible-deploy.yaml
- plausible-ingress.yaml
- plausible-svc.yaml

View File

@@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mail
spec:
replicas: 1
selector:
matchLabels:
app: mail
template:
metadata:
labels:
app: mail
spec:
nodeSelector:
kubernetes.io/arch: "amd64"
containers:
- name: mail
image: bytemark/smtp
ports:
- containerPort: 25

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: mail
spec:
selector:
app: mail
ports:
- protocol: TCP
port: 25
targetPort: 25

View File

@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: plausible
spec:
replicas: 1
selector:
matchLabels:
app: plausible
template:
metadata:
labels:
app: plausible
spec:
containers:
- name: plausible
image: plausible/analytics:latest
command:
- sh
- -c
- "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run"
ports:
- containerPort: 8000
envFrom:
- secretRef:
name: plausible-env

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: plausible-ingress
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
ingressClassName: nginx
tls:
- hosts:
- webstats.beatrice.wtf
secretName: plausible-tls
rules:
- host: webstats.beatrice.wtf
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: plausible
port:
number: 8000

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Secret
metadata:
name: plausible-env
data:
ADMIN_USER_EMAIL: aGVsbG9AYmVhdHJpY2Uud3Rm
ADMIN_USER_NAME: YmVhdHJpY2U=
ADMIN_USER_PWD: Xl55Z1d4UGtEMiRQSlF1JXZAQ1Q1ZF5lNnRDbmhBXk5qZnpTVlYyISNTN2U3N25wU25wZkpUYWF6RGVWRFVSTA==
BASE_URL: aHR0cHM6Ly93ZWJzdGF0cy5iZWF0cmljZS53dGY=
DATABASE_URL: cG9zdGdyZXM6Ly9wbGF1c2libGU6cnY5Mzhnd2d3ZzQzNGYyZjRoZzNnN2gzMDg5N2czaDVnMDk4akBwb3N0Z3Jlcy1iYXNlLXJ3LnBvc3RncmVzOjU0MzIvcGxhdXNpYmxlX2Ri
CLICKHOUSE_DATABASE_URL: aHR0cDovL2NsaWNraG91c2U6ODEyMy9wbGF1c2libGVfZXZlbnRzX2Ri
DISABLE_REGISTRATION: dHJ1ZQ==
MAILER_EMAIL: Y2xvdWRAbWluZC1vdmVyZmxvdy5uZXQ=
PORT: ODAwMA==
SECRET_KEY_BASE: M1FRQS9EdEdmR3c3cytjMzF2dnlmZ3lVc2F4RStNOWsxSWIvNVBjTUJIQjVHNWdpek00a2tSQ2lvbUFkU0lKR3FybGJ5R2h6VEFOcUJLWWZyeFZ0eHc9PQ==
SMTP_HOST_ADDR: bWFpbC5taW5kLW92ZXJmbG93Lm5ldA==
SMTP_HOST_PORT: NTg3
SMTP_HOST_SSL_ENABLED: ZmFsc2U=
SMTP_USER_NAME: Y2xvdWRAbWluZC1vdmVyZmxvdy5uZXQ=
SMTP_USER_PWD: UGNZY2h1TExVeWZUMmd2WTRUeDd3UTU3NVRucWp4ODR6Vk5vUDZNYg==

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: plausible
spec:
selector:
app: plausible
ports:
- protocol: TCP
port: 8000
targetPort: 8000

View File

@@ -9,12 +9,18 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
// Disable the built-in Grafana component
grafana+:: {},
// Prometheus customizations: external URL, persistent storage, and self-scrape enabled
// Prometheus customizations: external URL and persistent storage
prometheus+:: {
prometheus+: {
spec+: {
serviceMonitorSelector: {},
externalUrl: 'https://metrics.prod.panic.haus',
retention: '30d',
retentionSize: '16GB',
additionalScrapeConfigs: {
name: 'prometheus-additional-scrape-configs',
key: 'additional-scrape-configs.yaml',
},
storage: {
volumeClaimTemplate: {
spec: {
@@ -24,20 +30,6 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
},
},
},
// Set a ServiceMonitor selector that matches the Prometheus service's labels
serviceMonitorSelector: {
matchLabels: {
"k8s-app": "prometheus",
},
},
},
},
// Ensure the Prometheus service gets the label so that the selector above matches it
service+: {
metadata+: {
labels: {
"k8s-app": "prometheus",
},
},
},
},
@@ -141,4 +133,4 @@ local kp = (import 'kube-prometheus/main.libsonnet') + {
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ [name + '-ingress']: kp.ingress[name] for name in std.objectFields(kp.ingress) }
{ [name + '-ingress']: kp.ingress[name] for name in std.objectFields(kp.ingress) }

View File

@@ -2,7 +2,11 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
k8s-app: prometheus
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 3.2.1
name: prometheus-k8s
namespace: monitoring
spec:

View File

@@ -10,6 +10,9 @@ metadata:
name: k8s
namespace: monitoring
spec:
additionalScrapeConfigs:
key: additional-scrape-configs.yaml
name: prometheus-additional-scrape-configs
alerting:
alertmanagers:
- apiVersion: v2
@@ -38,6 +41,7 @@ spec:
requests:
memory: 400Mi
retention: 30d
retentionSize: 16GB
ruleNamespaceSelector: {}
ruleSelector: {}
scrapeConfigNamespaceSelector: {}
@@ -48,9 +52,7 @@ spec:
runAsUser: 1000
serviceAccountName: prometheus-k8s
serviceMonitorNamespaceSelector: {}
serviceMonitorSelector:
matchLabels:
k8s-app: prometheus
serviceMonitorSelector: {}
storage:
volumeClaimTemplate:
spec:

View File

@@ -2,7 +2,11 @@ apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: prometheus
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: kube-prometheus
app.kubernetes.io/version: 3.2.1
name: prometheus-k8s
namespace: monitoring
spec:

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
name: prometheus-additional-scrape-configs
namespace: monitoring
stringData:
additional-scrape-configs.yaml: |
- job_name: 'proxmox-holly-node-exporter'
scheme: https
metrics_path: /metrics
static_configs:
- targets: ['node-exporter.holly.panic.haus']

View File

@@ -16,6 +16,8 @@ spec:
labels:
app: redis
spec:
nodeSelector:
kubernetes.io/arch: amd64
containers:
- name: redis
image: redis:7.4-alpine

View File

@@ -4,7 +4,7 @@ metadata:
name: renovate
namespace: renovate
spec:
schedule: "*/15 * * * *"
schedule: '@hourly'
concurrencyPolicy: Forbid
jobTemplate:
spec:

View File

@@ -0,0 +1 @@
.git

Some files were not shown because too many files have changed in this diff Show More