generated from phoenix-oss/tenant-tpl
122 lines
4.5 KiB
YAML
122 lines
4.5 KiB
YAML
---
|
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
|
kind: HelmRelease
|
|
metadata:
|
|
name: postgres
|
|
namespace: ${TENANT_NAMESPACE}
|
|
spec:
|
|
serviceAccountName: ${TECHNICAL_ACCOUNT}
|
|
interval: 30m
|
|
chart:
|
|
spec:
|
|
chart: cluster
|
|
version: 0.3.1
|
|
sourceRef:
|
|
kind: HelmRepository
|
|
name: cloudnative-pg
|
|
namespace: ${TENANT_NAMESPACE}
|
|
install:
|
|
remediation:
|
|
retries: 3
|
|
upgrade:
|
|
cleanupOnFail: true
|
|
remediation:
|
|
strategy: rollback
|
|
retries: 3
|
|
values:
|
|
# check the complete configuration options at
|
|
# https://raw.githubusercontent.com/cloudnative-pg/charts/refs/tags/cluster-v0.3.1/charts/cluster/values.yaml
|
|
type: postgresql
|
|
mode: standalone
|
|
version:
|
|
postgresql: "17.5"
|
|
cluster:
|
|
instances: 3
|
|
storage:
|
|
size: 10Gi
|
|
# default storage class on ai-2 cluster, on basel or staging you
|
|
# should use 'ocs-storagecluster-ceph-rbd' instead
|
|
storageClass: ibm-spectrum-scale-fileset
|
|
walStorage:
|
|
# It's not mandatory to split WAL from the main data volume.
|
|
# However, doing so helps to avoid issues with the main data volume
|
|
# in cases where WAL exporting to the backup server experiences
|
|
# issues. For example, in scenarios where there's network congestion
|
|
# or even failures, the WAL may end up accumulating too much data
|
|
# to the point where the volume fills up, blocking the cluster from
|
|
# operating properly.
|
|
enabled: true
|
|
size: 10Gi
|
|
storageClass: ibm-spectrum-scale-fileset
|
|
resources:
|
|
requests:
|
|
cpu: "500m"
|
|
memory: 1Gi
|
|
limits:
|
|
cpu: "1"
|
|
memory: 1Gi
|
|
enableSuperuserAccess: true
|
|
superuserSecret: postgres-superuser
|
|
affinity:
|
|
topologyKey: failure-domain.beta.kubernetes.io/zone
|
|
postgresql:
|
|
parameters:
|
|
shared_buffers: 256MB
|
|
max_connections: "400"
|
|
initdb:
|
|
database: app
|
|
owner: app
|
|
options: []
|
|
encoding: UTF8
|
|
backups:
|
|
# As indicated by the 'enabled' flag, backups are disabled on
|
|
# this deployment. But the remaining of the block serves as an
|
|
# example of how to configure this cluster to export backups to
|
|
# a S3 bucket hosted on a MinIO server.
|
|
#
|
|
# For more information, refer to the helm chart's values.yaml
|
|
# or the official documentation at
|
|
# https://cloudnative-pg.io/documentation/1.26/backup/
|
|
enabled: false
|
|
endpointURL: https://glacier-1.kvant.cloud
|
|
provider: s3
|
|
s3:
|
|
bucket: phoenix-openshift-backups
|
|
path: /demo-postgres
|
|
# Ideally, you will never commit credentials in plain text;
|
|
# these values are here just for illustration. For a way to
|
|
# properly load them from kubernetes' secrets, refer to the
|
|
# commented-ou section 'valuesFrom' placed right below
|
|
accessKey: your-access-key
|
|
secretKey: your-secret-key
|
|
secret:
|
|
create: true
|
|
wal:
|
|
# If exporting to MinIO S3, you may have to disable encryption.
|
|
# This is how you achieve it
|
|
encryption: ""
|
|
data:
|
|
encryption: ""
|
|
scheduledBackups:
|
|
# You can give it any name and change the scheduled time to what
|
|
# fits your strategy. This serves as an example of how to configure
|
|
# the cluster to export a daily backup to the S3 bucket using
|
|
# barman object storage. You can also back up volumes instead.
|
|
# Check the backup documentation to find more information on
|
|
# which option suits you best.
|
|
- name: daily-minio
|
|
schedule: "@daily"
|
|
backupOwnerReference: self
|
|
method: barmanObjectStore
|
|
retentionPolicy: "180d" # It is mandatory to match this value with the bucket's retention period
|
|
# valuesFrom:
|
|
# - kind: Secret
|
|
# name: postgres-backup-s3 # name of the pre-existing secret that holds the key pair
|
|
# valuesKey: ACCESS_KEY_ID # name of the key inside the secret that holds the access key value
|
|
# targetPath: backups.s3.accessKey # path of the configuration that will be assigned the access key value
|
|
# optional: false
|
|
# - kind: Secret
|
|
# name: postgres-backup-s3 # name of the pre-existing secret that holds the key pair
|
|
# valuesKey: ACCESS_SECRET_KEY # name of the key inside the secret that holds the secret key value
|
|
# targetPath: backups.s3.secretKey # path of the configuration that will be assigned the secret key value
|
|
# optional: false
|