Merge pull request 'helm chart improvements' (#425) from patrickjahns/garage:helm-improvements into main
continuous-integration/drone/push Build is failing Details

Reviewed-on: #425
This commit is contained in:
maximilien 2023-01-27 10:51:04 +00:00
commit df30f3df4b
5 changed files with 141 additions and 34 deletions

View File

@ -4,28 +4,4 @@ metadata:
name: {{ include "garage.fullname" . }}-config
data:
garage.toml: |-
metadata_dir = "{{ .Values.garage.metadataDir }}"
data_dir = "{{ .Values.garage.dataDir }}"
replication_mode = "{{ .Values.garage.replicationMode }}"
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
{{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}

View File

@ -17,3 +17,24 @@ spec:
name: s3-web
selector:
{{- include "garage.selectorLabels" . | nindent 4 }}
{{- if .Values.monitoring.metrics.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "garage.fullname" . }}-metrics
labels:
{{- include "garage.labels" . | nindent 4 }}
annotations:
prometheus.io/scrape: "true"
spec:
type: ClusterIP
clusterIP: None
ports:
- port: 3903
targetPort: 3903
protocol: TCP
name: metrics
selector:
{{- include "garage.selectorLabels" . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,44 @@
{{- if .Values.monitoring.metrics.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "garage.fullname" . }}
{{- if .Values.monitoring.metrics.serviceMonitor.namespace }}
namespace: {{ tpl .Values.monitoring.metrics.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "garage.labels" . | nindent 4 }}
{{- with .Values.monitoring.metrics.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: metrics
{{- with .Values.monitoring.metrics.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.monitoring.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.monitoring.metrics.serviceMonitor.path }}
scheme: {{ .Values.monitoring.metrics.serviceMonitor.scheme }}
{{- with .Values.monitoring.metrics.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.monitoring.metrics.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "garage.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@ -14,8 +14,10 @@ spec:
{{- end }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
@ -31,7 +33,8 @@ spec:
initContainers:
# Copies garage.toml from configmap to temporary etc volume and replaces RPC secret placeholder
- name: {{ .Chart.Name }}-init
image: busybox:1.28
image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}"
imagePullPolicy: {{ .Values.initImage.pullPolicy }}
command: ["sh", "-c", "sed \"s/__RPC_SECRET_REPLACE__/$RPC_SECRET/\" /mnt/garage.toml > /mnt/etc/garage.toml"]
env:
- name: RPC_SECRET
@ -39,6 +42,8 @@ spec:
secretKeyRef:
name: {{ include "garage.rpcSecretName" . }}
key: rpcSecret
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
volumeMounts:
- name: configmap
mountPath: /mnt/garage.toml
@ -56,6 +61,8 @@ spec:
name: s3-api
- containerPort: 3902
name: web-api
- containerPort: 3903
name: admin
volumeMounts:
- name: meta
mountPath: /mnt/meta

View File

@ -4,8 +4,6 @@
# Garage configuration. These values go to garage.toml
garage:
metadataDir: "/mnt/meta"
dataDir: "/mnt/data"
# Default to 3 replicas, see the replication_mode section at
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
replicationMode: "3"
@ -22,6 +20,41 @@ garage:
web:
rootDomain: ".web.garage.tld"
index: "index.html"
# Template for the garage configuration
# Values can be templated
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garage.toml: |-
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
replication_mode = "{{ .Values.garage.replicationMode }}"
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
# Data persistence
persistence:
@ -50,6 +83,11 @@ image:
tag: ""
pullPolicy: IfNotPresent
initImage:
repository: busybox
tag: stable
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
@ -65,18 +103,19 @@ serviceAccount:
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
podSecurityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
runAsNonRoot: true
securityContext:
# The default security context is heavily restricted
# feel free to tune it to your requirements
capabilities:
drop:
- ALL
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
service:
# You can rely on any service to expose your cluster
@ -90,6 +129,7 @@ service:
web:
port: 3902
# NOTE: the admin API is excluded for now as it is not consistent across nodes
ingress:
s3:
api:
@ -153,3 +193,22 @@ nodeSelector: {}
tolerations: []
affinity: {}
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
enabled: false
serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator
# https://github.com/coreos/prometheus-operator
enabled: false
path: /metrics
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
labels: {}
interval: 15s
scheme: http
tlsConfig: {}
scrapeTimeout: 10s
relabelings: []
tracing:
sink: ""