helm chart improvements #425

Merged
maximilien merged 8 commits from patrickjahns/garage:helm-improvements into main 2023-01-27 10:51:05 +00:00
5 changed files with 141 additions and 34 deletions

View file

@ -4,28 +4,4 @@ metadata:
name: {{ include "garage.fullname" . }}-config name: {{ include "garage.fullname" . }}-config
data: data:
garage.toml: |- garage.toml: |-
metadata_dir = "{{ .Values.garage.metadataDir }}" {{- tpl (index (index .Values.garage) "garage.toml") $ | nindent 4 }}
data_dir = "{{ .Values.garage.dataDir }}"
replication_mode = "{{ .Values.garage.replicationMode }}"
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"

View file

@ -17,3 +17,24 @@ spec:
name: s3-web name: s3-web
selector: selector:
{{- include "garage.selectorLabels" . | nindent 4 }} {{- include "garage.selectorLabels" . | nindent 4 }}
{{- if .Values.monitoring.metrics.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "garage.fullname" . }}-metrics

Actually this is the admin API endpoint, so maybe we should simply call that -admin?

Actually this is the admin API endpoint, so maybe we should simply call that `-admin`?

The initial thought here was, to keep the service definitions for admin and metrics by their own. So having two services - which can have individual configurations if need be. A thought here was, that if the user would for example expose the admin api via a LoadBalancer service, the metrics service would not be impacted.
Or adding annotations to the "admin service" i.e. for istio would not affect prometheus and vice/versa

Let me know if this changes your thoughts - or if you would like to proceed with renaming the ports

The initial thought here was, to keep the service definitions for admin and metrics by their own. So having two services - which can have individual configurations if need be. A thought here was, that if the user would for example expose the admin api via a LoadBalancer service, the metrics service would not be impacted. Or adding annotations to the "admin service" i.e. for istio would not affect prometheus and vice/versa Let me know if this changes your thoughts - or if you would like to proceed with renaming the ports
labels:
{{- include "garage.labels" . | nindent 4 }}
annotations:
prometheus.io/scrape: "true"
spec:
type: ClusterIP
clusterIP: None
ports:
- port: 3903
targetPort: 3903
protocol: TCP
name: metrics

Same as above

Same as above
selector:
{{- include "garage.selectorLabels" . | nindent 4 }}
{{- end }}

View file

@ -0,0 +1,44 @@
{{- if .Values.monitoring.metrics.serviceMonitor.enabled }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "garage.fullname" . }}
{{- if .Values.monitoring.metrics.serviceMonitor.namespace }}
namespace: {{ tpl .Values.monitoring.metrics.serviceMonitor.namespace . }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels:
{{- include "garage.labels" . | nindent 4 }}
{{- with .Values.monitoring.metrics.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: metrics
{{- with .Values.monitoring.metrics.serviceMonitor.interval }}
interval: {{ . }}
{{- end }}
{{- with .Values.monitoring.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
honorLabels: true
path: {{ .Values.monitoring.metrics.serviceMonitor.path }}
scheme: {{ .Values.monitoring.metrics.serviceMonitor.scheme }}
{{- with .Values.monitoring.metrics.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.monitoring.metrics.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
jobLabel: "{{ .Release.Name }}"
selector:
matchLabels:
{{- include "garage.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View file

@ -14,8 +14,10 @@ spec:
{{- end }} {{- end }}
template: template:
metadata: metadata:
{{- with .Values.podAnnotations }}
annotations: annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }} {{- toYaml . | nindent 8 }}
{{- end }} {{- end }}
labels: labels:
@ -31,7 +33,8 @@ spec:
initContainers: initContainers:
# Copies garage.toml from configmap to temporary etc volume and replaces RPC secret placeholder # Copies garage.toml from configmap to temporary etc volume and replaces RPC secret placeholder
- name: {{ .Chart.Name }}-init - name: {{ .Chart.Name }}-init
image: busybox:1.28 image: "{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}"
imagePullPolicy: {{ .Values.initImage.pullPolicy }}
command: ["sh", "-c", "sed \"s/__RPC_SECRET_REPLACE__/$RPC_SECRET/\" /mnt/garage.toml > /mnt/etc/garage.toml"] command: ["sh", "-c", "sed \"s/__RPC_SECRET_REPLACE__/$RPC_SECRET/\" /mnt/garage.toml > /mnt/etc/garage.toml"]
env: env:
- name: RPC_SECRET - name: RPC_SECRET
@ -39,6 +42,8 @@ spec:
secretKeyRef: secretKeyRef:
name: {{ include "garage.rpcSecretName" . }} name: {{ include "garage.rpcSecretName" . }}
key: rpcSecret key: rpcSecret
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
volumeMounts: volumeMounts:
- name: configmap - name: configmap
mountPath: /mnt/garage.toml mountPath: /mnt/garage.toml
@ -56,6 +61,8 @@ spec:
name: s3-api name: s3-api
- containerPort: 3902 - containerPort: 3902
name: web-api name: web-api
- containerPort: 3903
name: admin
volumeMounts: volumeMounts:
- name: meta - name: meta
mountPath: /mnt/meta mountPath: /mnt/meta

View file

@ -4,8 +4,6 @@
# Garage configuration. These values go to garage.toml # Garage configuration. These values go to garage.toml
garage: garage:
metadataDir: "/mnt/meta"
dataDir: "/mnt/data"
# Default to 3 replicas, see the replication_mode section at # Default to 3 replicas, see the replication_mode section at
# https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/ # https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
replicationMode: "3" replicationMode: "3"
@ -22,6 +20,41 @@ garage:
web: web:
rootDomain: ".web.garage.tld" rootDomain: ".web.garage.tld"
index: "index.html" index: "index.html"
# Template for the garage configuration
# Values can be templated
# ref: https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/
garage.toml: |-
metadata_dir = "/mnt/meta"
data_dir = "/mnt/data"
replication_mode = "{{ .Values.garage.replicationMode }}"
rpc_bind_addr = "{{ .Values.garage.rpcBindAddr }}"
# rpc_secret will be populated by the init container from a k8s secret object
rpc_secret = "__RPC_SECRET_REPLACE__"
bootstrap_peers = {{ .Values.garage.bootstrapPeers }}
[kubernetes_discovery]
namespace = "{{ .Release.Namespace }}"
service_name = "{{ include "garage.fullname" . }}"
skip_crd = {{ .Values.garage.kubernetesSkipCrd }}
[s3_api]
s3_region = "{{ .Values.garage.s3.api.region }}"
api_bind_addr = "[::]:3900"
root_domain = "{{ .Values.garage.s3.api.rootDomain }}"
[s3_web]
bind_addr = "[::]:3902"
root_domain = "{{ .Values.garage.s3.web.rootDomain }}"
index = "{{ .Values.garage.s3.web.index }}"
[admin]
api_bind_addr = "[::]:3903"
{{- if .Values.monitoring.tracing.sink }}
trace_sink = "{{ .Values.monitoring.tracing.sink }}"
{{- end }}
# Data persistence # Data persistence
persistence: persistence:
@ -50,6 +83,11 @@ image:
tag: "" tag: ""
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
initImage:
repository: busybox
tag: stable
pullPolicy: IfNotPresent
imagePullSecrets: [] imagePullSecrets: []
nameOverride: "" nameOverride: ""
fullnameOverride: "" fullnameOverride: ""
@ -65,18 +103,19 @@ serviceAccount:
podAnnotations: {} podAnnotations: {}
podSecurityContext: {} podSecurityContext:
# fsGroup: 2000 runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
runAsNonRoot: true
securityContext: securityContext:
# The default security context is heavily restricted # The default security context is heavily restricted
# feel free to tune it to your requirements # feel free to tune it to your requirements
capabilities: capabilities:
drop: drop:
- ALL - ALL
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
service: service:
# You can rely on any service to expose your cluster # You can rely on any service to expose your cluster
@ -90,6 +129,7 @@ service:
web: web:
port: 3902 port: 3902
# NOTE: the admin API is excluded for now as it is not consistent across nodes # NOTE: the admin API is excluded for now as it is not consistent across nodes
ingress: ingress:
s3: s3:
api: api:
@ -153,3 +193,22 @@ nodeSelector: {}
tolerations: [] tolerations: []
affinity: {} affinity: {}
patrickjahns marked this conversation as resolved Outdated

Remove newline?

Remove newline?
monitoring:
metrics:
# If true, a service for monitoring is created with a prometheus.io/scrape annotation
enabled: false
serviceMonitor:
# If true, a ServiceMonitor CRD is created for a prometheus operator
# https://github.com/coreos/prometheus-operator
enabled: false
path: /metrics
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
labels: {}
interval: 15s
scheme: http
tlsConfig: {}
scrapeTimeout: 10s
relabelings: []
tracing:
sink: ""