cloud/Gargantua/helm/values.yaml
2024-10-14 09:51:55 +03:00

224 lines
7.4 KiB
YAML

# Default values for gargantua.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# A profile to be used for quicker deployments on SIATEL-managed or local development clusters.
# Known profiles: "siatelRo", "siatelCom", "localDev". These profiles include:
# - PostgreSQL database (effective options: "database.password", "database.postgresql.resources")
# - 'hostPath' volumes in <<profile>.volumesRoot>/<namespace>/<name>[database;home;storage]
# - ingress with tls (siatelRo/siatelCom profiles only)
profile: "" # Custom profile by default; carefully analyze the parameters in this file.
plugins: [ "admininstaller", "browserplugins", "emailplugins", "officeplugins", "cvtoivt" ]
localDev: # applies only to 'localDev' deployment profile
# for Rancher Desktop clusters this would look like "/mnt/<driveLetter>/path/to/volumes"
# for Docker Desktop clusters it should be something similar, but I still have to find out what it is
volumesRoot: "/mnt/f/rancher/volumes"
siatelRo: # applies only to 'siatelRo' deployment profile
cloud: "kubix.siatel.ro"
volumesRoot: "/srv/pv"
ingress:
enabled: true
clusterIssuer: letsencrypt-staging
siatelCom: # applies only to 'siatelCom' deployment profile
cloud: "node0.siatel.com"
volumesRoot: "/srv/pv"
ingress:
enabled: true
clusterIssuer: letsencrypt-staging
# Database parameters
database:
# The JDBC URL of the database; e.g: "jdbc:postgresql://someservice.somenamespace.svc.cluster.local:5432/somedbname"
# The database MUST have a user named 'siatel' configured with ownership-equivalent access to the configured database
# If left empty then a database with matching requirements will be deployed according to supported and specified engine
jdbcUrl: "" #"jdbc:postgresql://pgs1.default.svc.cluster.local:5432/dev8"
# The password for the 'siatel' user; 'secretName' is favored over 'clearText'.
password:
secretName: "" # MUST have the 'siatel-password' field; will be looked up in the namespace where the release is installed
clearText: "siatel123" # highly discouraged in production.
# Parameters for including and initializing a database with the deployment of Gargantua
type: "postgresql" # postgresql [| postgresql-ha | sqlserver | oracle] default is "postgresql"
# The included PostgreSQL deployment configuration.
postgresql:
image: postgres:latest
persistence:
# The PostgreSQL data volume: [storageClass/size | existingClaim | hostPath]
hostPath: ""
existingClaim: ""
storageClass: ""
size: "8Gi"
# Pod resources
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2
memory: 2Gi
# TODO:
# postgresql-ha: {}
# sqlserver: {}
# oracle: {}
##############################################
# Gargantua deployment parameters start here #
##############################################
# The deployment container image
image:
repository: harbor.kubix.siatel.ro/builds/gargantua
# repository: harbor.node0.siatel.com/releases/gargantua
# repository: busybox
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: [ ]
#start:
# command: [ 'tail' ]
# args: [ '-f', '/dev/null' ]
# Persistent data stores.
persistence:
# The 'home' datastore (i.e. configuration files)
home:
# The 'home' data volume: [storageClass/size | existingClaim | hostPath]
hostPath: ""
existingClaim: ""
storageClass: ""
size: "4Gi"
# The 'storage' datastore (i.e. documents' contents)
storage:
# The 'storage' data volume: [hostPath | existingClaim | storageClass/size]
hostPath: ""
existingClaim: ""
storageClass: ""
size: "64Gi"
license:
# The volume containing the 'server.lic' file [secretName | existingClaim | hostPath]
secretName: "gargantua-license"
existingClaim: ""
hostPath: "" # "/mnt/f/rancher/g8-cloud-license"
# Additional volumes on the output Deployment definition.
volumes: [ ]
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: [ ]
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
# Pod resources
resources:
requests:
cpu: 1
memory: 2Gi
limits:
cpu: 8
memory: 8Gi
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
replicaCount: 1
# This is to override the chart name.
nameOverride: ""
fullnameOverride: ""
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
serviceAccount:
# Specifies whether a service account should be created
create: false
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: { }
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# This is for setting Kubernetes Annotations to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
podAnnotations: { }
# This is for setting Kubernetes Labels to a Pod.
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
podLabels: { }
podSecurityContext: { }
# fsGroup: 2000
securityContext: { }
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
service:
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
type: ClusterIP
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
port: 8080
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: false
className: ""
annotations: { }
# cert-manager.io/ClusterIssuer: letsencrypt-staging
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: my-app.mydomain
paths:
- path: /
pathType: ImplementationSpecific
tls: [ ]
# - secretName: my-app-ingress-tls
# hosts:
# - my-app.mydomain
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: { }
tolerations: [ ]
affinity: { }