tenant-demo/postgres/app/helmrelease.yaml

111 lines
3.8 KiB
YAML

---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: postgres
namespace: ${TENANT_NAMESPACE}
spec:
serviceAccountName: ${TECHNICAL_ACCOUNT}
interval: 30m
chart:
spec:
chart: cluster
version: 0.3.1
sourceRef:
kind: HelmRepository
name: cloudnative-pg
namespace: ${TENANT_NAMESPACE}
install:
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
strategy: rollback
retries: 3
values:
# check the complete configuration options at
# https://raw.githubusercontent.com/cloudnative-pg/charts/refs/tags/cluster-v0.3.1/charts/cluster/values.yaml
type: postgresql
mode: standalone
version:
postgresql: "17.5"
cluster:
instances: 3
storage:
size: 10Gi
storageClass: ocs-storagecluster-ceph-rbd
walStorage:
# It's not mandatory to split WAL from the main data volume.
# However, doing so helps to avoid issues with the main data volume
# in cases where WAL exporting to the backup server experiences
# issues. For example, in scenarios where there's network congestion
# or even failures, the WAL may end up accumulating too much data
# to the point where the volume fills up, blocking the cluster from
# operating properly.
enabled: true
size: 10Gi
storageClass: ocs-storagecluster-ceph-rbd
resources:
requests:
cpu: "500m"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi
enableSuperuserAccess: true
superuserSecret: postgres-superuser
affinity:
topologyKey: failure-domain.beta.kubernetes.io/zone
postgresql:
parameters:
shared_buffers: 256MB
max_connections: "400"
initdb:
database: app
owner: app
options: []
encoding: UTF8
backups:
# Backups are disabled in this example, but here's an example
# on how to configure this cluster to export backups to a S3
# bucket hosted on a MinIO server.
#
# For more information, refer to the helm chart's values.yaml
# or the official documentation at
# https://cloudnative-pg.io/documentation/1.26/backup/
enabled: false
endpointURL: https://glacier-1.kvant.cloud
provider: s3
s3:
bucket: phoenix-openshift-backups
path: /demo-postgres
# Ideally, you will never commit credentials in plain text;
# these values are here just for illustration. For a way to
# properly load them from kubernetes' secrets, refer to the
# commented-ou section 'valuesFrom' placed right below
accessKey: your-access-key
secretKey: your-secret-key
secret:
create: true
wal:
encryption: ""
data:
encryption: ""
scheduledBackups:
- name: daily-minio
schedule: "@daily"
backupOwnerReference: self
method: barmanObjectStore
retentionPolicy: "180d" # It is mandatory to match this value with the bucket's retention period
# valuesFrom:
# - kind: Secret
# name: postgres-backup-s3 # name of the pre-existing secret that holds the key pair
# valuesKey: ACCESS_KEY_ID # name of the key inside the secret that holds the access key value
# targetPath: backups.s3.accessKey # path of the configuration that will be assigned the access key value
# optional: false
# - kind: Secret
# name: postgres-backup-s3 # name of the pre-existing secret that holds the key pair
# valuesKey: ACCESS_SECRET_KEY # name of the key inside the secret that holds the secret key value
# targetPath: backups.s3.secretKey # path of the configuration that will be assigned the secret key value
# optional: false