removed
This commit is contained in:
parent
94654ed47e
commit
ce50ecc752
@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@ -1,24 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: gargantua
|
||||
description: Gargantua Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 8.3.10625
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "8.3.10625"
|
||||
@ -1,22 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "gargantua.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "gargantua.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "gargantua.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "gargantua.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
@ -1,62 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "gargantua.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "gargantua.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "gargantua.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "gargantua.labels" -}}
|
||||
helm.sh/chart: {{ include "gargantua.chart" . }}
|
||||
{{ include "gargantua.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "gargantua.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "gargantua.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "gargantua.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "gargantua.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -1,145 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{.Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "gargantua.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "gargantua.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
{{/* TODO: initContainer to wait for the database to be available */}}
|
||||
containers:
|
||||
- name: {{.Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{.Values.image.repository }}:{{.Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{.Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: SIATEL_JDBC_URL
|
||||
{{- if .Values.database.jdbcUrl }}
|
||||
value: "{{ .Values.database.jdbcUrl }}"
|
||||
{{- else if eq .Values.database.type "postgresql" }}
|
||||
value: "jdbc:postgresql://{{ .Release.Name }}-database.{{ .Release.Namespace }}.svc.cluster.local:5432/siatel"
|
||||
{{- end }}
|
||||
- name: SIATEL_JDBC_PASS
|
||||
value: "{{ .Values.database.password.clearText }}" {{/* TODO manage secret */}}
|
||||
- name: SIATEL_PLUGINS
|
||||
value: "{{ join "," .Values.plugins }}"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml .Values.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml .Values.readinessProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- mountPath: /srv/siatel/home
|
||||
name: {{ .Release.Name }}-home-pv
|
||||
- mountPath: /srv/siatel/storage
|
||||
name: {{ .Release.Name }}-storage-pv
|
||||
- mountPath: /license
|
||||
name: licence
|
||||
{{- with .Values.volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.start }}
|
||||
{{- toYaml .Values.start | nindent 10 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: {{ .Release.Name }}-home-pv
|
||||
{{- if and .Values.persistence.home.storageClass .Values.persistence.home.size }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Release.Name }}-home-pvc
|
||||
{{- else if .Values.persistence.home.existingClaim }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.persistence.home.existingClaim }}
|
||||
{{- else if .Values.persistence.home.hostPath }}
|
||||
hostPath:
|
||||
path: {{ .Values.persistence.home.hostPath }}
|
||||
type: 'DirectoryOrCreate'
|
||||
{{- else if .Values.profile }}
|
||||
hostPath:
|
||||
type: 'DirectoryOrCreate'
|
||||
{{- if eq .Values.profile "localDev" }}
|
||||
path: {{ .Values.localDev.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/home
|
||||
{{- else if eq .Values.profile "siatelRo" }}
|
||||
path: {{ .Values.siatelRo.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/home
|
||||
{{- else if eq .Values.profile "siatelCom" }}
|
||||
path: {{ .Values.siatelCom.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/home
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-storage-pv
|
||||
{{- if and .Values.persistence.storage.storageClass .Values.persistence.storage.size }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Release.Name }}-storage-pvc
|
||||
{{- else if .Values.persistence.storage.existingClaim }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.persistence.storage.existingClaim }}
|
||||
{{- else if .Values.persistence.storage.hostPath }}
|
||||
hostPath:
|
||||
path: {{ .Values.persistence.storage.hostPath }}
|
||||
type: 'DirectoryOrCreate'
|
||||
{{- else if .Values.profile }}
|
||||
hostPath:
|
||||
type: 'DirectoryOrCreate'
|
||||
{{- if eq .Values.profile "localDev" }}
|
||||
path: {{ .Values.localDev.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/storage
|
||||
{{- else if eq .Values.profile "siatelRo" }}
|
||||
path: {{ .Values.siatelRo.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/storage
|
||||
{{- else if eq .Values.profile "siatelCom" }}
|
||||
path: {{ .Values.siatelCom.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/storage
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: licence
|
||||
{{- if .Values.license.secretName }}
|
||||
secret:
|
||||
secretName: {{ .Values.license.secretName }}
|
||||
{{- else if .Values.license.existingClaim }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.license.existingClaim }}
|
||||
{{- else if .Values.license.hostPath }}
|
||||
hostPath:
|
||||
path: {{ .Values.license.hostPath }}
|
||||
type: 'Directory'
|
||||
{{- end }}
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@ -1,32 +0,0 @@
|
||||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@ -1,108 +0,0 @@
|
||||
{{- if or .Values.ingress.enabled -}}
|
||||
{{/* The 'values' customized ingress */}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.className }}
|
||||
ingressClassName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- with .pathType }}
|
||||
pathType: {{ . }}
|
||||
{{- end }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "gargantua.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- else if and (eq .Values.profile "siatelRo") .Values.siatelRo.ingress.enabled -}}
|
||||
{{/* The siatelRo standard ingress */}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
cert-manager.io/ClusterIssuer: {{ .Values.siatelRo.ingress.clusterIssuer }}
|
||||
kubernetes.io/ingress.class: nginx
|
||||
{{- with .Values.ingress.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- "{{ .Release.Name }}.{{ .Values.siatelRo.cloud }}"
|
||||
secretName: "{{ .Release.Name }}-ingress-tls"
|
||||
rules:
|
||||
- host: "{{ .Release.Name }}.{{ .Values.siatelRo.cloud }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "gargantua.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
|
||||
{{- else if and (eq .Values.profile "siatelCom") .Values.siatelCom.ingress.enabled -}}
|
||||
{{/* The siatelCom standard ingress */}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
cert-manager.io/ClusterIssuer: {{ .Values.siatelCom.ingress.clusterIssuer }}
|
||||
kubernetes.io/ingress.class: nginx
|
||||
{{- with .Values.ingress.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- "{{ .Release.Name }}.{{ .Values.siatelCom.cloud }}"
|
||||
secretName: "{{ .Release.Name }}-ingress-tls"
|
||||
rules:
|
||||
- host: "{{ .Release.Name }}.{{ .Values.siatelCom.cloud }}"
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "gargantua.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.port }}
|
||||
{{- end }}
|
||||
@ -1,101 +0,0 @@
|
||||
{{- if and (not .Values.database.jdbcUrl) (eq .Values.database.type "postgresql") }}
|
||||
{{- if and .Values.database.postgresql.persistence.storageClass .Values.database.postgresql.persistence.size }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-database-pvc
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
type: database
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.database.postgresql.persistence.size }}
|
||||
storageClassName: {{ .Values.database.postgresql.persistence.storageClass }}
|
||||
---
|
||||
{{- end}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-database
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
type: database
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}
|
||||
type: database
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
type: database
|
||||
spec:
|
||||
containers:
|
||||
- name: postgresql
|
||||
image: postgres
|
||||
env:
|
||||
- name: POSTGRES_USER
|
||||
value: "siatel"
|
||||
- name: POSTGRES_PASSWORD
|
||||
value: {{ .Values.database.password.clearText }}
|
||||
- name: POSTGRES_DB
|
||||
value: "siatel"
|
||||
ports:
|
||||
- name: pgs
|
||||
containerPort: 5432
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: {{ .Release.Name }}-database-pv
|
||||
subPath: data
|
||||
resources:
|
||||
{{- toYaml .Values.database.postgresql.resources | nindent 12 }}
|
||||
volumes:
|
||||
- name: {{ .Release.Name }}-database-pv
|
||||
{{- if and .Values.database.postgresql.persistence.storageClass .Values.database.postgresql.persistence.size }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Release.Name }}-database-pvc
|
||||
{{- else if .Values.database.postgresql.persistence.existingClaim }}
|
||||
persistentVolumeClaim:
|
||||
claimName: .Values.database.postgresql.persistence.existingClaim
|
||||
{{- else if .Values.database.postgresql.persistence.hostPath }}
|
||||
hostPath:
|
||||
type: 'DirectoryOrCreate'
|
||||
path: {{ .Values.database.postgresql.persistence.hostPath }}
|
||||
{{- else if .Values.profile }}
|
||||
hostPath:
|
||||
type: 'DirectoryOrCreate'
|
||||
{{- if eq .Values.profile "localDev" }}
|
||||
path: {{ .Values.localDev.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/database
|
||||
{{- else if eq .Values.profile "siatelRo" }}
|
||||
path: {{ .Values.siatelRo.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/database
|
||||
{{- else if eq .Values.profile "siatelCom" }}
|
||||
path: {{ .Values.siatelCom.volumesRoot }}/{{ .Release.Namespace }}/{{ .Release.Name }}/database
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-database
|
||||
namespace: {{.Release.Namespace}}
|
||||
labels:
|
||||
app: {{.Release.Name }}
|
||||
type: database
|
||||
spec:
|
||||
selector:
|
||||
app: {{.Release.Name }}
|
||||
type: database
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- protocol: TCP
|
||||
name: pgs
|
||||
port: 5432
|
||||
targetPort: 5432
|
||||
{{- end }}
|
||||
@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "gargantua.fullname" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "gargantua.selectorLabels" . | nindent 4 }}
|
||||
@ -1,13 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "gargantua.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
||||
@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "gargantua.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "gargantua.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "gargantua.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
@ -1,223 +0,0 @@
|
||||
# Default values for gargantua.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# A profile to be used for quicker deployments on SIATEL-managed or local development clusters.
|
||||
# Known profiles: "siatelRo", "siatelCom", "localDev". These profiles include:
|
||||
# - PostgreSQL database (effective options: "database.password", "database.postgresql.resources")
|
||||
# - 'hostPath' volumes in <<profile>.volumesRoot>/<namespace>/<name>[database;home;storage]
|
||||
# - ingress with tls (siatelRo/siatelCom profiles only)
|
||||
profile: "" # Custom profile by default; carefully analyze the parameters in this file.
|
||||
|
||||
plugins: [ "admininstaller", "browserplugins", "emailplugins", "officeplugins", "cvtoivt" ]
|
||||
|
||||
localDev: # applies only to 'localDev' deployment profile
|
||||
# for Rancher Desktop clusters this would look like "/mnt/<driveLetter>/path/to/volumes"
|
||||
# for Docker Desktop clusters it should be something similar, but I still have to find out what it is
|
||||
volumesRoot: "/mnt/f/rancher/volumes"
|
||||
|
||||
siatelRo: # applies only to 'siatelRo' deployment profile
|
||||
cloud: "kubix.siatel.ro"
|
||||
volumesRoot: "/srv/pv"
|
||||
ingress:
|
||||
enabled: true
|
||||
clusterIssuer: letsencrypt-staging
|
||||
|
||||
|
||||
siatelCom: # applies only to 'siatelCom' deployment profile
|
||||
cloud: "node0.siatel.com"
|
||||
volumesRoot: "/srv/pv"
|
||||
ingress:
|
||||
enabled: true
|
||||
clusterIssuer: letsencrypt-staging
|
||||
|
||||
# Database parameters
|
||||
database:
|
||||
# The JDBC URL of the database; e.g: "jdbc:postgresql://someservice.somenamespace.svc.cluster.local:5432/somedbname"
|
||||
# The database MUST have a user named 'siatel' configured with ownership-equivalent access to the configured database
|
||||
# If left empty then a database with matching requirements will be deployed according to supported and specified engine
|
||||
jdbcUrl: "" #"jdbc:postgresql://pgs1.default.svc.cluster.local:5432/dev8"
|
||||
|
||||
# The password for the 'siatel' user; 'secretName' is favored over 'clearText'.
|
||||
password:
|
||||
secretName: "" # MUST have the 'siatel-password' field; will be looked up in the namespace where the release is installed
|
||||
clearText: "siatel123" # highly discouraged in production.
|
||||
|
||||
# Parameters for including and initializing a database with the deployment of Gargantua
|
||||
type: "postgresql" # postgresql [| postgresql-ha | sqlserver | oracle] default is "postgresql"
|
||||
|
||||
# The included PostgreSQL deployment configuration.
|
||||
postgresql:
|
||||
image: postgres:latest
|
||||
persistence:
|
||||
# The PostgreSQL data volume: [storageClass/size | existingClaim | hostPath]
|
||||
hostPath: ""
|
||||
existingClaim: ""
|
||||
storageClass: ""
|
||||
size: "8Gi"
|
||||
# Pod resources
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
|
||||
# TODO:
|
||||
# postgresql-ha: {}
|
||||
# sqlserver: {}
|
||||
# oracle: {}
|
||||
|
||||
##############################################
|
||||
# Gargantua deployment parameters start here #
|
||||
##############################################
|
||||
|
||||
# The deployment container image
|
||||
image:
|
||||
repository: harbor.kubix.siatel.ro/builds/gargantua
|
||||
# repository: harbor.node0.siatel.com/releases/gargantua
|
||||
# repository: busybox
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
# This is for the secretes for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: [ ]
|
||||
|
||||
#start:
|
||||
# command: [ 'tail' ]
|
||||
# args: [ '-f', '/dev/null' ]
|
||||
|
||||
# Persistent data stores.
|
||||
persistence:
|
||||
|
||||
# The 'home' datastore (i.e. configuration files)
|
||||
home:
|
||||
# The 'home' data volume: [storageClass/size | existingClaim | hostPath]
|
||||
hostPath: ""
|
||||
existingClaim: ""
|
||||
storageClass: ""
|
||||
size: "4Gi"
|
||||
|
||||
# The 'storage' datastore (i.e. documents' contents)
|
||||
storage:
|
||||
# The 'storage' data volume: [hostPath | existingClaim | storageClass/size]
|
||||
hostPath: ""
|
||||
existingClaim: ""
|
||||
storageClass: ""
|
||||
size: "64Gi"
|
||||
|
||||
license:
|
||||
# The volume containing the 'server.lic' file [secretName | existingClaim | hostPath]
|
||||
secretName: "gargantua-license"
|
||||
existingClaim: ""
|
||||
hostPath: "" # "/mnt/f/rancher/g8-cloud-license"
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes: [ ]
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts: [ ]
|
||||
# - name: foo
|
||||
# mountPath: "/etc/foo"
|
||||
# readOnly: true
|
||||
|
||||
# Pod resources
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
limits:
|
||||
cpu: 8
|
||||
memory: 8Gi
|
||||
|
||||
# This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
|
||||
replicaCount: 1
|
||||
|
||||
# This is to override the chart name.
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
#This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: false
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: { }
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# This is for setting Kubernetes Annotations to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
podAnnotations: { }
|
||||
# This is for setting Kubernetes Labels to a Pod.
|
||||
# For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
podLabels: { }
|
||||
|
||||
podSecurityContext: { }
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: { }
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
|
||||
service:
|
||||
# This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
|
||||
type: ClusterIP
|
||||
# This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
|
||||
port: 8080
|
||||
|
||||
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: { }
|
||||
# cert-manager.io/ClusterIssuer: letsencrypt-staging
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: my-app.mydomain
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: [ ]
|
||||
# - secretName: my-app-ingress-tls
|
||||
# hosts:
|
||||
# - my-app.mydomain
|
||||
|
||||
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: http
|
||||
|
||||
#This section is for setting up autoscaling more information can be found here: https://kubernetes.io/docs/concepts/workloads/autoscaling/
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 3
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: { }
|
||||
|
||||
tolerations: [ ]
|
||||
|
||||
affinity: { }
|
||||
Loading…
Reference in New Issue
Block a user