Compare commits

..

16 Commits

33 changed files with 1915 additions and 206 deletions

View File

@ -1,3 +1,4 @@
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/rtomik-helm-charts)](https://artifacthub.io/packages/search?repo=rtomik-helm-charts)
# helm-charts
Repo for helm charts
Donetick Helm chart

View File

@ -9,11 +9,7 @@
# when the hash of the last commit in the branch you set up changes. This does
# NOT apply to ownership claim operations, which are processed immediately.
#
repositoryID: 8ec75275-172f-45e9-b188-3b0dd609bee9
repositoryID: 11743389-27d2-4d03-a271-1dd96844082f
owners: # (optional, used to claim repository ownership)
- name: user1
email: tomikr7@gmail.com
ignore: # (optional, packages that should not be indexed by Artifact Hub)
- name: donetick # Exact match
version: 0.1.0 # Regular expression (when omitted, all versions are ignored)
- name: rtomik
email: n@gmail.com

View File

@ -1,9 +1,9 @@
apiVersion: v2
name: donetick
description: A Helm chart for Donetick application
description: Donetick helm chart for Kubernetes
type: application
version: 1.0.1
appVersion: "latest"
appVersion: "v0.1.38"
maintainers:
- name: Richard Tomik
email: no@m.com

View File

@ -6,6 +6,10 @@ A Helm chart for deploying the Donetick task management application on Kubernete
This chart deploys [Donetick](https://github.com/donetick/donetick) on a Kubernetes cluster using the Helm package manager.
Source code can be found here:
- https://github.com/rtomik/helm-charts/tree/main/charts/donetick
## Prerequisites
- Kubernetes 1.19+

View File

@ -5,7 +5,7 @@ fullnameOverride: ""
## Image settings
image:
repository: donetick/donetick
tag: latest
tag: "v0.1.38"
pullPolicy: IfNotPresent
## Deployment settings
@ -41,8 +41,8 @@ service:
## Ingress settings
ingress:
enabled: true
className: "traefik"
enabled: false
className: ""
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
hosts:
@ -58,8 +58,8 @@ ingress:
## Persistence settings
persistence:
enabled: true
storageClass: "longhorn"
enabled: false
storageClass: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
@ -85,13 +85,13 @@ extraVolumeMounts: []
extraVolumes: []
## Resource limits and requests
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# resources:
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Application health checks
probes:

View File

@ -0,0 +1,16 @@
apiVersion: v2
name: jellyseerr
description: Jellyseerr helm chart for Kubernetes
type: application
version: 0.0.1
appVersion: 2.5.2
maintainers:
- name: Richard Tomik
email: no@m.com
keywords:
- jellyseerr
- jellyfin
- media-requests
home: https://github.com/rtomik/helm-charts
sources:
- https://github.com/fallenbagel/jellyseerr

View File

@ -0,0 +1,32 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "jellyseerr.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "jellyseerr.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "jellyseerr.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "jellyseerr.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}
2. Jellyseerr will be available at port {{ .Values.service.port }}
{{- if .Values.persistence.enabled }}
3. Data is persisted using PVC: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{ else }}{{ include "jellyseerr.fullname" . }}-config{{ end }}
{{- else }}
3. WARNING: No persistence enabled. Data will be lost when pods are restarted.
{{- end }}
For more information about using this Helm chart, please refer to the README.md file.

145
charts/jellyseerr/readme.md Normal file
View File

@ -0,0 +1,145 @@
# Jellyseerr Helm Chart
A Helm chart for deploying [Jellyseerr](https://github.com/fallenbagel/jellyseerr) on Kubernetes.
## Introduction
This chart deploys Jellyseerr on a Kubernetes cluster using the Helm package manager. Jellyseerr is a fork of Overseerr for Jellyfin support.
Source code can be found here:
- https://github.com/rtomik/helm-charts/tree/main/charts/jellyseerr
## Prerequisites
- Kubernetes 1.19+
- Helm 3.0+
- PV provisioner support in the underlying infrastructure (if persistence is needed)
## Installing the Chart
To install the chart with the release name `jellyseerr`:
```bash
helm repo add rtomik-charts https://rtomik.github.io/helm-charts
helm install jellyseerr rtomik-charts/jellyseerr
```
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `jellyseerr` deployment:
```bash
helm uninstall jellyseerr
```
## Parameters
### Global parameters
| Name | Description | Value |
|------------------------|---------------------------------------------------------------|--------|
| `nameOverride` | String to partially override the release name | `""` |
| `fullnameOverride` | String to fully override the release name | `""` |
### Image parameters
| Name | Description | Value |
|-------------------------|--------------------------------------------------------------|--------------------------------|
| `image.repository` | Jellyseerr image repository | `ghcr.io/fallenbagel/jellyseerr` |
| `image.tag` | Jellyseerr image tag | `latest` |
| `image.pullPolicy` | Jellyseerr image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
### Deployment parameters
| Name | Description | Value |
|--------------------------------------|--------------------------------------------------|-----------|
| `replicaCount` | Number of Jellyseerr replicas | `1` |
| `revisionHistoryLimit` | Number of revisions to retain for rollback | `3` |
| `podSecurityContext.runAsNonRoot` | Run containers as non-root user | `true` |
| `podSecurityContext.runAsUser` | User ID for the container | `1000` |
| `podSecurityContext.fsGroup` | Group ID for the container filesystem | `1000` |
| `containerSecurityContext` | Security context for the container | See values.yaml |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Tolerations for pod assignment | `[]` |
| `affinity` | Affinity for pod assignment | `{}` |
### Service parameters
| Name | Description | Value |
|----------------------------|----------------------------------------------|-------------|
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.port` | Service HTTP port | `5055` |
### Ingress parameters
| Name | Description | Value |
|----------------------------|----------------------------------------------|------------------------|
| `ingress.enabled` | Enable ingress record generation | `false` |
| `ingress.className` | IngressClass name | `""` |
| `ingress.annotations` | Additional annotations for the Ingress resource | `{}` |
| `ingress.hosts` | Array of host and path objects | See values.yaml |
| `ingress.tls` | TLS configuration | `[]` |
### Persistence parameters
| Name | Description | Value |
|-------------------------------|----------------------------------------------|-----------------|
| `persistence.enabled` | Enable persistence using PVC | `true` |
| `persistence.existingClaim` | Use an existing PVC | `""` |
| `persistence.storageClass` | PVC Storage Class | `""` |
| `persistence.accessMode` | PVC Access Mode | `ReadWriteOnce` |
| `persistence.size` | PVC Storage Size | `1Gi` |
| `persistence.annotations` | Additional custom annotations for the PVC | `{}` |
### Environment variables
| Name | Description | Value |
|--------------------------|----------------------------------------------|-----------------|
| `env` | Environment variables for Jellyseerr | See values.yaml |
| `extraEnv` | Additional environment variables | `[]` |
### Resources parameters
| Name | Description | Value |
|--------------------------|----------------------------------------------|-----------------|
| `resources.limits` | The resources limits for containers | See values.yaml |
| `resources.requests` | The resources requests for containers | See values.yaml |
## Configuration
The following table lists the configurable parameters of the Jellyseerr chart and their default values.
### Environment Variables
You can configure Jellyseerr by setting environment variables:
```yaml
env:
- name: TZ
value: "America/New_York"
- name: LOG_LEVEL
value: "info"
- name: PORT
value: "5055"
```
### Using Persistence
By default, persistence is enabled with a 1Gi volume:
```yaml
persistence:
enabled: true
size: 1Gi
```
You can also use an existing PVC:
```yaml
persistence:
enabled: true
existingClaim: my-jellyseerr-pvc
```

View File

@ -0,0 +1,45 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "jellyseerr.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "jellyseerr.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- printf "%s" $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "jellyseerr.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "jellyseerr.labels" -}}
helm.sh/chart: {{ include "jellyseerr.chart" . }}
{{ include "jellyseerr.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "jellyseerr.selectorLabels" -}}
app.kubernetes.io/name: {{ include "jellyseerr.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,107 @@
### templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "jellyseerr.fullname" . }}
labels:
{{- include "jellyseerr.labels" . | nindent 4 }}
annotations:
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "jellyseerr.selectorLabels" . | nindent 6 }}
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
template:
metadata:
labels:
{{- include "jellyseerr.selectorLabels" . | nindent 8 }}
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.startupArgs }}
args:
{{- range .Values.startupArgs }}
- {{ . | quote }}
{{- end }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- if .Values.probes.liveness.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.probes.liveness.path }}
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
successThreshold: {{ .Values.probes.liveness.successThreshold }}
{{- end }}
{{- if .Values.probes.readiness.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.probes.readiness.path }}
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
successThreshold: {{ .Values.probes.readiness.successThreshold }}
{{- end }}
env:
{{- range .Values.env }}
- name: {{ .name }}
value: {{ .value | quote }}
{{- end }}
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
- name: config
mountPath: /app/config
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumes:
- name: config
persistentVolumeClaim:
claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{ else }}{{ include "jellyseerr.fullname" . }}-config{{ end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,43 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "jellyseerr.fullname" . }}
labels:
{{- include "jellyseerr.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.className }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- if .secretName }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "jellyseerr.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "jellyseerr.fullname" . }}-config
labels:
{{- include "jellyseerr.labels" . | nindent 4 }}
{{- with .Values.persistence.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
{{- if .Values.persistence.storageClass }}
storageClassName: {{ .Values.persistence.storageClass | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "jellyseerr.fullname" . }}
labels:
{{- include "jellyseerr.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "jellyseerr.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,117 @@
## Global settings
nameOverride: ""
fullnameOverride: ""
## Image settings
image:
repository: ghcr.io/fallenbagel/jellyseerr
tag: 2.5.2
pullPolicy: IfNotPresent
## Deployment settings
replicaCount: 1
revisionHistoryLimit: 3
# Optional startup arguments
startupArgs: []
# Pod security settings
podSecurityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
## Pod scheduling
nodeSelector: {}
tolerations: []
affinity: {}
## Pod annotations
podAnnotations: {}
## Service settings
service:
type: ClusterIP
port: 5055
## Ingress settings
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: jellyseerr.domain.com
paths:
- path: /
pathType: Prefix
tls: []
# - hosts:
# - jellyseerr.domain.com
# secretName: jellyseerr-tls
## Persistence settings
persistence:
enabled: true
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 1Gi
annotations: {}
## Environment variables
env:
- name: TZ
value: "UTC"
- name: LOG_LEVEL
value: "info"
- name: PORT
value: "5055"
# Extra environment variables (for advanced use cases)
extraEnv: []
# - name: NODE_ENV
# value: "production"
# Extra volume mounts
extraVolumeMounts: []
# Extra volumes
extraVolumes: []
## Resource limits and requests
# resources:
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Application health checks
probes:
liveness:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
path: /api/v1/status
readiness:
enabled: true
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
successThreshold: 1
path: /api/v1/status

View File

@ -0,0 +1,18 @@
apiVersion: v2
name: qbittorrent-vpn
description: qBittorrent with Gluetun VPN sidecar for Kubernetes
type: application
version: 0.0.1
appVersion: 5.1.0
maintainers:
- name: Richard Tomik
email: richard.tomik@proton.me
keywords:
- qbittorrent
- vpn
- gluetun
- torrent
home: https://github.com/rtomik/helm-charts
sources:
- https://github.com/linuxserver/docker-qbittorrent
- https://github.com/qdm12/gluetun

View File

@ -0,0 +1,38 @@
Thank you for installing {{ .Chart.Name }}.
Your qBittorrent with VPN has been deployed successfully!
1. Get the application URL:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "qbittorrent-vpn.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "qbittorrent-vpn.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "qbittorrent-vpn.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "qbittorrent-vpn.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }}
Visit http://127.0.0.1:{{ .Values.service.port }} to access qBittorrent
{{- end }}
2. VPN Status:
To check the VPN connection status:
kubectl exec -it -n {{ .Release.Namespace }} deployment/{{ include "qbittorrent-vpn.fullname" . }} -c gluetun -- curl -s http://localhost:8000/v1/vpn/status
3. Public IP:
To check your current public IP through the VPN:
kubectl exec -it -n {{ .Release.Namespace }} deployment/{{ include "qbittorrent-vpn.fullname" . }} -c gluetun -- curl -s http://localhost:8000/v1/publicip/ip
4. Verify qBittorrent:
Make sure qBittorrent is functioning by accessing the Web UI at the URL in step 1.
For more information about this chart:
https://github.com/rtomik/helm-charts/tree/main/charts/qbittorrent-vpn

View File

@ -0,0 +1,273 @@
# qBittorrent with Gluetun VPN
A Helm chart for deploying qBittorrent with a Gluetun VPN sidecar container on Kubernetes.
## Introduction
This chart deploys [qBittorrent](https://www.qbittorrent.org/) alongside [Gluetun](https://github.com/qdm12/gluetun), a VPN client/tunnel in a container, to ensure all BitTorrent traffic is routed through the VPN. The chart supports all major VPN providers and protocols through Gluetun's comprehensive compatibility.
Source code can be found here:
- https://github.com/rtomik/helm-charts/tree/main/charts/qbittorrent-vpn
Note: Currently only tested with NordVPN an OpenVPN configuration.
## Features
- **Multiple VPN Providers**: Support for 30+ VPN providers including NordVPN, ProtonVPN, Private Internet Access, ExpressVPN, Surfshark, Mullvad, and more
- **Protocol Support**: Use OpenVPN or WireGuard based on your provider's capabilities
- **Server Selection**: Choose servers by country, city, or specific hostnames with optional randomization
- **Security**: Proper container security settings to ensure traffic only flows through the VPN
- **Health Monitoring**: Integrated health checks for both qBittorrent and the VPN connection
- **Persistence**: Separate volume storage for configuration and downloads
- **Web UI**: Access qBittorrent via web interface with optional ingress support
- **Proxy Services**: HTTP and Shadowsocks proxies for additional devices to use the VPN tunnel
## Prerequisites
- Kubernetes 1.19+
- Helm 3.2.0+
- PV provisioner support in the cluster
- A valid subscription to a VPN service
## Installation
### Add the Repository
```bash
helm repo add rtomik-charts https://rtomik.github.io/helm-charts
helm repo update
```
### Create a Secret for VPN Credentials
For better security, store your VPN credentials in a Kubernetes secret:
```bash
# For OpenVPN authentication
kubectl create secret generic vpn-credentials \
--namespace default \
--from-literal=username='your-vpn-username' \
--from-literal=password='your-vpn-password'
# For WireGuard authentication (if using WireGuard)
kubectl create secret generic wireguard-keys \
--namespace default \
--from-literal=private_key='your-wireguard-private-key'
```
Then reference this secret in your values:
```yaml
gluetun:
credentials:
create: false
existingSecret: "vpn-credentials"
usernameKey: "username"
passwordKey: "password"
```
### Install the Chart
```bash
# Option 1: Installation with custom values file (recommended)
helm install qbittorrent-vpn rtomik-charts/qbittorrent-vpn -f values.yaml -n media
# Option 2: Installation with inline parameter overrides
helm install qbittorrent-vpn rtomik-charts/qbittorrent-vpn -n media \
--set gluetun.vpn.provider=nordvpn \
--set gluetun.vpn.serverCountries=Germany \
--set-string gluetun.credentials.existingSecret=vpn-credentials
```
## Uninstallation
```bash
helm uninstall qbittorrent-vpn -n media
```
Note: This will not delete Persistent Volume Claims. To delete them:
```bash
kubectl delete pvc -l app.kubernetes.io/instance=qbittorrent-vpn
```
## Configuration
### Key Parameters
| Parameter | Description | Default |
|---------------------------------------|-------------------------------------------------------|----------------------------|
| `qbittorrent.image.repository` | qBittorrent image repository | `linuxserver/qbittorrent` |
| `qbittorrent.image.tag` | qBittorrent image tag | `latest` |
| `gluetun.image.repository` | Gluetun image repository | `qmcgaw/gluetun` |
| `gluetun.image.tag` | Gluetun image tag | `v3.40.0` |
| `gluetun.vpn.provider` | VPN provider name | `nordvpn` |
| `gluetun.vpn.type` | VPN protocol (`openvpn` or `wireguard`) | `openvpn` |
| `gluetun.vpn.serverCountries` | Countries to connect to (comma-separated) | `Germany` |
| `persistence.config.size` | Size of PVC for qBittorrent config | `2Gi` |
| `persistence.downloads.size` | Size of PVC for downloads | `100Gi` |
| `ingress.enabled` | Enable ingress controller resource | `true` |
| `ingress.hosts[0].host` | Hostname for the ingress | `qbittorrent.domain.com` |
For a complete list of parameters, see the [values.yaml](values.yaml) file.
### Example: Using with NordVPN
```yaml
gluetun:
vpn:
provider: "nordvpn"
type: "openvpn"
serverCountries: "United States"
openvpn:
NORDVPN_CATEGORY: "P2P" # For torrent-optimized servers
credentials:
create: true
username: "your-nordvpn-username"
password: "your-nordvpn-password"
```
### Example: Using with ProtonVPN
```yaml
gluetun:
vpn:
provider: "protonvpn"
type: "openvpn"
serverCountries: "Switzerland"
openvpn:
PROTONVPN_TIER: "2" # 0 is free, 2 is paid (Plus/Visionary)
SERVER_FEATURES: "p2p" # For torrent support
credentials:
create: true
username: "protonvpn-username"
password: "protonvpn-password"
```
### Example: Using with Private Internet Access
```yaml
gluetun:
vpn:
provider: "private internet access"
type: "openvpn"
serverCountries: "US"
credentials:
create: true
username: "pia-username"
password: "pia-password"
settings:
VPN_PORT_FORWARDING: "on" # PIA supports port forwarding
```
## VPN Provider Support
This chart supports all VPN providers compatible with Gluetun, including:
- AirVPN
- Cyberghost
- ExpressVPN
- FastestVPN
- HideMyAss
- IPVanish
- IVPN
- Mullvad
- NordVPN
- Perfect Privacy
- Private Internet Access (PIA)
- PrivateVPN
- ProtonVPN
- PureVPN
- Surfshark
- TorGuard
- VyprVPN
- WeVPN
- Windscribe
For the complete list and provider-specific options, see the [Gluetun Providers Documentation](https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers).
## Additional Features
### Accessing the HTTP Proxy
Gluetun provides an HTTP proxy on port 8888 that can be used by other applications to route traffic through the VPN. To expose this proxy:
```yaml
service:
proxies:
enabled: true
httpPort: 8888
socksPort: 8388
```
### Firewall Settings
By default, the chart enables the Gluetun firewall to prevent leaks if the VPN connection drops. You can customize this:
```yaml
gluetun:
settings:
FIREWALL: "on"
FIREWALL_OUTBOUND_SUBNETS: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
```
### Port Forwarding
For VPN providers that support port forwarding (like PIA):
```yaml
gluetun:
settings:
VPN_PORT_FORWARDING: "on"
STATUS_FILE: "/tmp/gluetun-status.json"
```
## Troubleshooting
### VPN Connection Issues
If the VPN isn't connecting properly:
1. Check the Gluetun logs:
```bash
kubectl logs deployment/qbittorrent-vpn -c gluetun
```
2. Verify your credentials are correct:
```bash
kubectl describe secret vpn-credentials
```
3. Try setting the log level to debug for more detailed information:
```yaml
gluetun:
extraEnv:
- name: LOG_LEVEL
value: "debug"
```
### qBittorrent Can't Create Directories
If you see errors like "Could not create required directory":
1. Make sure the init container is enabled and properly configured
2. Ensure proper `fsGroup` is set in the `podSecurityContext`
3. Check that the persistence volume allows the correct permissions
### Firewall/Security Issues
If you encounter iptables or network issues:
1. Ensure the Gluetun container has `privileged: true`
2. Verify the `NET_ADMIN` capability is added
3. Check that the `/dev/net/tun` device is correctly mounted
## License
This chart is licensed under the MIT License.
## Acknowledgements
- [Gluetun](https://github.com/qdm12/gluetun) by [qdm12](https://github.com/qdm12)
- [LinuxServer.io](https://linuxserver.io/) for the qBittorrent container
- The qBittorrent team for the excellent torrent client

View File

@ -0,0 +1,45 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "qbittorrent-vpn.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "qbittorrent-vpn.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- printf "%s" $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "qbittorrent-vpn.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "qbittorrent-vpn.labels" -}}
helm.sh/chart: {{ include "qbittorrent-vpn.chart" . }}
{{ include "qbittorrent-vpn.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "qbittorrent-vpn.selectorLabels" -}}
app.kubernetes.io/name: {{ include "qbittorrent-vpn.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,306 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "qbittorrent-vpn.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate # Using Recreate instead of RollingUpdate for stateful pods
template:
metadata:
labels:
{{- include "qbittorrent-vpn.selectorLabels" . | nindent 8 }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
# Add hostNetwork if specified
{{- if .Values.hostNetwork }}
hostNetwork: {{ .Values.hostNetwork }}
{{- end }}
# Init containers if needed for directory setup
{{- if .Values.initContainers }}
initContainers:
{{- toYaml .Values.initContainers | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
{{- if .Values.gluetun.enabled }}
# Gluetun VPN container
- name: gluetun
image: "{{ .Values.gluetun.image.repository }}:{{ .Values.gluetun.image.tag }}"
imagePullPolicy: {{ .Values.gluetun.image.pullPolicy }}
securityContext:
{{- toYaml .Values.gluetun.securityContext | nindent 12 }}
env:
# VPN Provider selection - Common settings for all VPN types
- name: VPN_SERVICE_PROVIDER
value: {{ .Values.gluetun.vpn.provider | quote }}
- name: VPN_TYPE
value: {{ .Values.gluetun.vpn.type | quote }}
- name: SERVER_COUNTRIES
value: {{ .Values.gluetun.vpn.serverCountries | quote }}
{{- if .Values.gluetun.vpn.serverNames }}
- name: SERVER_HOSTNAMES
value: {{ .Values.gluetun.vpn.serverNames | quote }}
{{- end }}
{{- if .Values.gluetun.vpn.serverCities }}
- name: SERVER_CITIES
value: {{ .Values.gluetun.vpn.serverCities | quote }}
{{- end }}
{{- if .Values.gluetun.vpn.randomize }}
- name: SERVER_HOSTNAMES_RANDOMIZED
value: {{ .Values.gluetun.vpn.randomize | quote }}
{{- end }}
# OpenVPN specific configuration
{{- if eq .Values.gluetun.vpn.type "openvpn" }}
{{- if .Values.gluetun.credentials.create }}
- name: OPENVPN_USER
valueFrom:
secretKeyRef:
name: {{ include "qbittorrent-vpn.fullname" . }}-vpn-credentials
key: {{ .Values.gluetun.credentials.usernameKey }}
- name: OPENVPN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ include "qbittorrent-vpn.fullname" . }}-vpn-credentials
key: {{ .Values.gluetun.credentials.passwordKey }}
{{- else if .Values.gluetun.credentials.existingSecret }}
- name: OPENVPN_USER
valueFrom:
secretKeyRef:
name: {{ .Values.gluetun.credentials.existingSecret }}
key: {{ .Values.gluetun.credentials.usernameKey }}
- name: OPENVPN_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.gluetun.credentials.existingSecret }}
key: {{ .Values.gluetun.credentials.passwordKey }}
{{- end }}
# Additional OpenVPN settings
{{- with .Values.gluetun.vpn.openvpn }}
{{- range $key, $value := . }}
- name: {{ $key | upper }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
{{- end }}
# WireGuard specific configuration
{{- if eq .Values.gluetun.vpn.type "wireguard" }}
{{- if and .Values.gluetun.vpn.wireguard.privateKey .Values.gluetun.credentials.create }}
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: {{ include "qbittorrent-vpn.fullname" . }}-vpn-credentials
key: wireguard_private_key
{{- else if and .Values.gluetun.vpn.wireguard.privateKeyExistingSecret .Values.gluetun.vpn.wireguard.privateKeyExistingSecretKey }}
- name: WIREGUARD_PRIVATE_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.gluetun.vpn.wireguard.privateKeyExistingSecret }}
key: {{ .Values.gluetun.vpn.wireguard.privateKeyExistingSecretKey }}
{{- end }}
# Additional WireGuard settings
{{- with .Values.gluetun.vpn.wireguard }}
{{- if .addresses }}
- name: WIREGUARD_ADDRESSES
value: {{ .addresses | quote }}
{{- end }}
{{- if .endpointIP }}
- name: WIREGUARD_ENDPOINT_IP
value: {{ .endpointIP | quote }}
{{- end }}
{{- if .endpointPort }}
- name: WIREGUARD_ENDPOINT_PORT
value: {{ .endpointPort | quote }}
{{- end }}
{{- if .publicKey }}
- name: WIREGUARD_PUBLIC_KEY
value: {{ .publicKey | quote }}
{{- end }}
{{- end }}
{{- end }}
# Gluetun general settings
{{- with .Values.gluetun.settings }}
{{- range $key, $value := . }}
- name: {{ $key | upper }}
value: {{ $value | quote }}
{{- end }}
{{- end }}
# Extra environment variables
{{- with .Values.gluetun.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: control
containerPort: 8000
protocol: TCP
- name: http-proxy
containerPort: 8888
protocol: TCP
- name: shadowsocks-tcp
containerPort: 8388
protocol: TCP
- name: shadowsocks-udp
containerPort: 8388
protocol: UDP
{{- with .Values.gluetun.extraPorts }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
# Mount tun device for VPN
- name: tun
mountPath: /dev/net/tun
{{- if .Values.gluetun.persistence.enabled }}
- name: gluetun-config
mountPath: /gluetun
{{- end }}
{{- with .Values.gluetun.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.gluetun.resources | nindent 12 }}
{{- end }}
# qBittorrent container
- name: qbittorrent
image: "{{ .Values.qbittorrent.image.repository }}:{{ .Values.qbittorrent.image.tag }}"
imagePullPolicy: {{ .Values.qbittorrent.image.pullPolicy }}
{{- if .Values.qbittorrent.securityContext }}
securityContext:
{{- toYaml .Values.qbittorrent.securityContext | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.qbittorrent.service.port }}
protocol: TCP
{{- if .Values.qbittorrent.bittorrentPort }}
- name: bittorrent-tcp
containerPort: {{ .Values.qbittorrent.bittorrentPort }}
protocol: TCP
- name: bittorrent-udp
containerPort: {{ .Values.qbittorrent.bittorrentPort }}
protocol: UDP
{{- end }}
{{- if .Values.probes.liveness.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.probes.liveness.path }}
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.liveness.periodSeconds }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
successThreshold: {{ .Values.probes.liveness.successThreshold }}
{{- end }}
{{- if .Values.probes.readiness.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.probes.readiness.path }}
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.probes.readiness.periodSeconds }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
successThreshold: {{ .Values.probes.readiness.successThreshold }}
{{- end }}
env:
{{- range .Values.qbittorrent.env }}
- name: {{ .name }}
value: {{ .value | quote }}
{{- end }}
{{- with .Values.qbittorrent.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
volumeMounts:
{{- if .Values.qbittorrent.persistence.config.enabled }}
- name: config
mountPath: {{ .Values.qbittorrent.persistence.config.mountPath }}
{{- end }}
{{- if .Values.qbittorrent.persistence.downloads.enabled }}
- name: downloads
mountPath: {{ .Values.qbittorrent.persistence.downloads.mountPath }}
{{- end }}
{{- with .Values.qbittorrent.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.qbittorrent.resources | nindent 12 }}
volumes:
# Create /dev/net/tun as a device
- name: tun
hostPath:
path: /dev/net/tun
type: CharDevice
{{- if .Values.qbittorrent.persistence.config.enabled }}
- name: config
persistentVolumeClaim:
claimName: {{ if .Values.qbittorrent.persistence.config.existingClaim }}{{ .Values.qbittorrent.persistence.config.existingClaim }}{{ else }}{{ include "qbittorrent-vpn.fullname" . }}-config{{ end }}
{{- end }}
{{- if .Values.qbittorrent.persistence.downloads.enabled }}
- name: downloads
persistentVolumeClaim:
claimName: {{ if .Values.qbittorrent.persistence.downloads.existingClaim }}{{ .Values.qbittorrent.persistence.downloads.existingClaim }}{{ else }}{{ include "qbittorrent-vpn.fullname" . }}-downloads{{ end }}
{{- end }}
{{- if and .Values.gluetun.enabled .Values.gluetun.persistence.enabled }}
{{- if .Values.gluetun.persistence.useEmptyDir }}
- name: gluetun-config
emptyDir: {}
{{- else }}
- name: gluetun-config
persistentVolumeClaim:
claimName: {{ if .Values.gluetun.persistence.existingClaim }}{{ .Values.gluetun.persistence.existingClaim }}{{ else }}{{ include "qbittorrent-vpn.fullname" . }}-gluetun{{ end }}
{{- end }}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,43 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.className }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
{{- if .secretName }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "qbittorrent-vpn.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,55 @@
{{- if and .Values.qbittorrent.persistence.config.enabled (not .Values.qbittorrent.persistence.config.existingClaim) }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}-config
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.qbittorrent.persistence.config.accessMode | quote }}
{{- if .Values.qbittorrent.persistence.config.storageClass }}
storageClassName: {{ .Values.qbittorrent.persistence.config.storageClass | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.qbittorrent.persistence.config.size | quote }}
{{- end }}
{{- if and .Values.qbittorrent.persistence.downloads.enabled (not .Values.qbittorrent.persistence.downloads.existingClaim) }}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}-downloads
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.qbittorrent.persistence.downloads.accessMode | quote }}
{{- if .Values.qbittorrent.persistence.downloads.storageClass }}
storageClassName: {{ .Values.qbittorrent.persistence.downloads.storageClass | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.qbittorrent.persistence.downloads.size | quote }}
{{- end }}
{{- if and .Values.gluetun.enabled .Values.gluetun.persistence.enabled (not .Values.gluetun.persistence.useEmptyDir) (not .Values.gluetun.persistence.existingClaim) }}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}-gluetun
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
spec:
accessModes:
- {{ .Values.gluetun.persistence.accessMode | quote }}
{{- if .Values.gluetun.persistence.storageClass }}
storageClassName: {{ .Values.gluetun.persistence.storageClass | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.gluetun.persistence.size | quote }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if and .Values.gluetun.enabled .Values.gluetun.credentials.create }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}-vpn-credentials
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
type: Opaque
data:
{{- if eq .Values.gluetun.vpn.type "openvpn" }}
{{ .Values.gluetun.credentials.usernameKey }}: {{ .Values.gluetun.credentials.username | b64enc | quote }}
{{ .Values.gluetun.credentials.passwordKey }}: {{ .Values.gluetun.credentials.password | b64enc | quote }}
{{- end }}
{{- if and (eq .Values.gluetun.vpn.type "wireguard") .Values.gluetun.vpn.wireguard.privateKey }}
wireguard_private_key: {{ .Values.gluetun.vpn.wireguard.privateKey | b64enc | quote }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "qbittorrent-vpn.fullname" . }}
labels:
{{- include "qbittorrent-vpn.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "qbittorrent-vpn.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,228 @@
## Global settings
nameOverride: ""
fullnameOverride: ""
## Deployment settings
replicaCount: 1
revisionHistoryLimit: 3
## Pod security settings
podSecurityContext:
runAsNonRoot: false
runAsUser: 0 # Run all containers as root
fsGroup: 0 # Use root group for volumes
## qBittorrent Image settings
qbittorrent:
image:
repository: linuxserver/qbittorrent
tag: 5.1.0
pullPolicy: IfNotPresent
securityContext: {}
# Open port for BitTorrent traffic
bittorrentPort: 6881
env:
- name: PUID
value: "0" # Run as root
- name: PGID
value: "0" # Root group
- name: TZ
value: "UTC"
- name: WEBUI_PORT
value: "8080"
extraEnv: []
service:
port: 8080
#resources:
# limits:
# cpu: 1000m
# memory: 2Gi
# requests:
# cpu: 200m
# memory: 512Mi
persistence:
config:
enabled: true
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 2Gi
mountPath: /config
downloads:
enabled: true
existingClaim: ""
storageClass: ""
accessMode: ReadWriteOnce
size: 2Gi
mountPath: /downloads
# Volume mounts specific to qBittorrent
extraVolumeMounts: []
# Volumes specific to qBittorrent
extraVolumes: []
# Probes for qBittorrent
probes:
liveness:
enabled: true
path: /
initialDelaySeconds: 0 # Startup probe handles delayed start
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readiness:
enabled: true
path: /
initialDelaySeconds: 0 # Startup probe handles delayed start
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
## Gluetun VPN settings
gluetun:
enabled: true
image:
repository: qmcgaw/gluetun
tag: v3.40.0 # Latest version as of this writing
pullPolicy: IfNotPresent
securityContext:
privileged: true
capabilities:
add:
- NET_ADMIN
# VPN provider configuration
vpn:
# Choose from: nordvpn, protonvpn, expressvpn, surfshark, mullvad, ivpn, private internet access, etc.
provider: "nordvpn"
# Choose from: openvpn or wireguard
type: "openvpn"
# Server selection (comma-separated lists)
serverCountries: "Netherlands" # e.g., "Netherlands,Germany,Sweden"
serverCities: "" # e.g., "Amsterdam,Frankfurt" (optional)
serverNames: "" # e.g., "nl1,nl2" (optional)
randomize: "true" # Randomize server selection
# OpenVPN specific settings (when type is "openvpn")
openvpn:
# Add any OpenVPN specific settings here, they'll be converted to env vars
OPENVPN_PROTOCOL: "udp"
# WireGuard specific settings (when type is "wireguard")
wireguard:
privateKey: "" # Will be stored in Secret if provided
privateKeyExistingSecret: ""
privateKeyExistingSecretKey: ""
addresses: "" # e.g., "10.64.222.21/32"
endpointIP: "" # Optional: specify endpoint IP
endpointPort: "" # Optional: specify endpoint port
publicKey: "" # Optional: server public key
# VPN credentials (choose one method)
credentials:
create: true # set to false if using existing secret
# For OpenVPN (normal credentials)
username: ""
password: ""
# For WireGuard, the privateKey is specified in vpn.wireguard.privateKey
# Alternatively, reference an existing secret
existingSecret: ""
usernameKey: "username"
passwordKey: "password"
# General Gluetun settings as environment variables
settings:
FIREWALL: "on"
FIREWALL_OUTBOUND_SUBNETS: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
DNS_ADDRESS: "1.1.1.1"
HEALTH_SERVER_PORT: "8000"
# Important: Add these settings to make networking work correctly with ingress
SERVER_ALLOWLIST: "qbittorrent:8080" # Allow accessing qBittorrent container
FIREWALL_INPUT_PORTS: "8080" # Allow ingress traffic to port 8080
FIREWALL_DEBUG: "on" # Enable firewall debugging (temporarily)
JOURNALD: "off" # Disable journald (not needed for debugging)
# Optional port forwarding
VPN_PORT_FORWARDING: "off"
# Extra environment variables
extraEnv:
- name: LOG_LEVEL
value: "info"
# Extra ports to expose
extraPorts: []
# - name: custom-port
# containerPort: 9999
# protocol: TCP
# Resources for Gluetun
resources:
limits:
cpu: 300m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
# Persistence for Gluetun
persistence:
enabled: true
existingClaim: false
storageClass: ""
accessMode: ReadWriteOnce
size: 100Mi
# Volume mounts specific to Gluetun
extraVolumeMounts: []
# Volumes specific to Gluetun
extraVolumes: []
## Service settings
service:
type: ClusterIP
port: 8080
## Ingress settings
ingress:
enabled: false
className: ""
annotations: []
hosts:
- host: qbittorrent.example.com
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- qbittorrent.example.com
# Additional specifications
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
extraVolumes: []
# Temporary options for development/debugging
hostNetwork: false
initContainers: []

View File

@ -1,8 +1,8 @@
apiVersion: v2
name: recipya
description: A Helm chart for Recipya recipe manager application
description: Recipya helm chart for Kubernetes
type: application
version: 0.0.1
version: 0.0.2
appVersion: "v1.2.2"
maintainers:
- name: Richard Tomik

View File

@ -2,10 +2,15 @@
A Helm chart for deploying [Recipya](https://github.com/reaper47/recipya) on Kubernetes.
[Source Code](https://github.com/rtomik/helm-charts/tree/main/charts%2Frecipya)
## Introduction
This chart deploys Recipya recipe manager on a Kubernetes cluster using the Helm package manager.
Source code can be found here:
- https://github.com/rtomik/helm-charts/tree/main/charts/recipya
## Prerequisites
- Kubernetes 1.19+
@ -25,12 +30,28 @@ The command deploys Recipya on the Kubernetes cluster in the default configurati
## Uninstalling the Chart
To uninstall/delete the `my-recipya` deployment:
To uninstall/delete the `recipya` deployment:
```bash
helm uninstall recipya -n recipya
```
## Important Configuration Notes
### Server URL
When deploying with an ingress, it's **critical** to set `config.server.url` to match your ingress URL (including https if you're using TLS). This ensures that redirects after login work correctly:
```yaml
config:
server:
url: "https://your-recipya-domain.com"
```
### Ingress Configuration
This chart includes optimized ingress configurations for Traefik, with support for WebSockets and proper security headers. If you're using a different ingress controller, you may need to adjust annotations accordingly.
## Parameters
### Global parameters
@ -48,24 +69,22 @@ helm uninstall recipya -n recipya
| Name | Description | Value |
|-----------------------------------------|--------------------------------------------------|-----------|
| `podSecurityContext.fsGroup` | Group ID for the Recipya container | `1000` |
| `containerSecurityContext.runAsUser` | User ID for the Recipya container | `1000` |
| `containerSecurityContext.runAsGroup` | Group ID for the Recipya container | `1000` |
| `containerSecurityContext.runAsNonRoot` | Run containers as non-root | `true` |
| `containerSecurityContext` | Security context for the container | `{}` |
### Recipya configuration parameters
| Name | Description | Value |
|-----------------------------------------|-------------------------------------------------------|----------------|
| `config.server.port` | Server port | `8078` |
| `config.server.autologin` | Whether to login automatically | `false` |
| `config.server.is_demo` | Whether the app is a demo version | `false` |
| `config.server.is_prod` | Whether the app is in production | `false` |
| `config.server.no_signups` | Whether to disable user account registrations | `false` |
| `config.server.url` | Base URL for the application | `http://0.0.0.0` |
| `config.email.address` | The email address for SendGrid | `""` |
| `config.email.sendgrid` | SendGrid API key | `""` |
| `config.documentIntelligence.endpoint` | Azure Document Intelligence endpoint | `""` |
| `config.documentIntelligence.key` | Azure Document Intelligence key | `""` |
| Name | Description | Value |
|-----------------------------------------|-------------------------------------------------------|---------------------|
| `config.server.port` | Server port | `8078` |
| `config.server.autologin` | Whether to login automatically | `false` |
| `config.server.is_demo` | Whether the app is a demo version | `false` |
| `config.server.is_prod` | Whether the app is in production | `false` |
| `config.server.no_signups` | Whether to disable user account registrations | `false` |
| `config.server.url` | Base URL for the application | `http://0.0.0.0` |
| `config.email.address` | The email address for SendGrid | `""` |
| `config.email.sendgrid` | SendGrid API key | `""` |
| `config.documentIntelligence.endpoint` | Azure Document Intelligence endpoint | `""` |
| `config.documentIntelligence.key` | Azure Document Intelligence key | `""` |
### Service parameters
@ -76,30 +95,88 @@ helm uninstall recipya -n recipya
### Ingress parameters
| Name | Description | Value |
|--------------------------|--------------------------------------------------|-------------|
| `ingress.enabled` | Enable ingress controller resource | `false` |
| `ingress.className` | IngressClass that will be used | `""` |
| `ingress.hosts[0].host` | Default host for the ingress resource | `chart-example.local` |
| `ingress.tls` | Create TLS Secret | `[]` |
| Name | Description | Value |
|-------------------------------|--------------------------------------------------|------------------------|
| `ingress.enabled` | Enable ingress controller resource | `false` |
| `ingress.className` | IngressClass that will be used | `"traefik"` |
| `ingress.annotations` | Additional ingress annotations | See values.yaml |
| `ingress.hosts[0].host` | Default host for the ingress resource | `chart-example.local` |
| `ingress.tls` | TLS configuration | `[]` |
### Persistence parameters
| Name | Description | Value |
|--------------------------------------|------------------------------------------|-----------|
| `persistence.enabled` | Enable persistence using PVC | `true` |
| `persistence.accessMode` | PVC Access Mode | `ReadWriteOnce` |
| `persistence.size` | PVC Storage Request | `1Gi` |
| `persistence.storageClass` | Storage class of backing PVC | `""` |
| Name | Description | Value |
|--------------------------------------|------------------------------------------|------------------|
| `persistence.enabled` | Enable persistence using PVC | `true` |
| `persistence.accessMode` | PVC Access Mode | `ReadWriteOnce` |
| `persistence.size` | PVC Storage Request | `1Gi` |
| `persistence.storageClass` | Storage class of backing PVC | `""` |
### Resource parameters
| Name | Description | Value |
|--------------------------|------------------------------------------|-----------|
| `resources.limits.cpu` | CPU limit | `500m` |
| `resources.limits.memory`| Memory limit | `512Mi` |
| `resources.requests.cpu` | CPU request | `100m` |
| `resources.requests.memory` | Memory request | `128Mi` |
| Name | Description | Value |
|-------------------------------|------------------------------------------|-----------|
| `resources.limits.cpu` | CPU limit | `500m` |
| `resources.limits.memory` | Memory limit | `512Mi` |
| `resources.requests.cpu` | CPU request | `100m` |
| `resources.requests.memory` | Memory request | `128Mi` |
### Probe parameters
| Name | Description | Value |
|--------------------------------------|--------------------------------------------|-----------|
| `probes.liveness.enabled` | Enable liveness probe | `true` |
| `probes.liveness.path` | Path for liveness probe | `/` |
| `probes.readiness.enabled` | Enable readiness probe | `true` |
| `probes.readiness.path` | Path for readiness probe | `/` |
## Traefik Ingress Configuration
The chart includes specially configured middlewares for Traefik to ensure proper functioning of Recipya:
```yaml
ingress:
enabled: true
className: "traefik"
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: recipya-recipya-headers@kubernetescrd
traefik.ingress.kubernetes.io/service.sticky: "true"
traefik.ingress.kubernetes.io/session-cookie-name: "recipya_session"
hosts:
- host: recipya.example.com
paths:
- path: /
pathType: ImplementationSpecific
tls:
- hosts:
- recipya.example.com
```
This configuration includes:
1. Custom Content Security Policy allowing essential scripts from unpkg.com
2. Sticky sessions for maintaining authentication
3. Proper headers for proxy operation
## Content Security Policy Configuration
The chart includes a custom middleware that configures the proper Content Security Policy for Recipya. This is particularly important as the application requires access to external scripts from unpkg.com:
```yaml
contentSecurityPolicy: >-
default-src 'self';
script-src 'self' 'unsafe-inline' 'unsafe-eval' blob: data: https://unpkg.com;
style-src 'self' 'unsafe-inline';
img-src 'self' data: blob:;
font-src 'self' data:;
connect-src 'self' ws: wss: *;
worker-src 'self' blob:;
frame-src 'self';
media-src 'self' blob:;
object-src 'none';
form-action 'self';
```
## Using Existing Secrets

View File

@ -0,0 +1,73 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "recipya.fullname" . }}-init-script
labels:
{{- include "recipya.labels" . | nindent 4 }}
data:
init.sh: |
#!/bin/sh
set -e
CONFIG_DIR="/home/recipya/.config/Recipya"
CONFIG_FILE="$CONFIG_DIR/config.json"
TARGET_PORT={{ .Values.config.server.port }}
echo "Starting initialization with port $TARGET_PORT..."
# Create directories if they don't exist
mkdir -p $CONFIG_DIR/Backup
mkdir -p $CONFIG_DIR/Database
mkdir -p $CONFIG_DIR/Images
mkdir -p $CONFIG_DIR/Logs
mkdir -p $CONFIG_DIR/Videos
echo "Directories created."
# Create config.json if it doesn't exist or update the existing one
if [ -f "$CONFIG_FILE" ]; then
echo "Found existing config.json, updating port to $TARGET_PORT"
# Use jq to modify the port in the existing config file
TMP_FILE=$(mktemp)
cat $CONFIG_FILE | jq ".server.port = $TARGET_PORT" > $TMP_FILE
mv $TMP_FILE $CONFIG_FILE
else
echo "Creating new config.json with port $TARGET_PORT"
# Create a new config.json with default values and the specified port
cat > $CONFIG_FILE << EOF
{
"email": {
"from": "{{ .Values.config.email.address | default "" }}",
"sendGridAPIKey": "{{ .Values.config.email.sendgrid | default "" }}"
},
"integrations": {
"azureDocumentIntelligence": {
"endpoint": "{{ .Values.config.documentIntelligence.endpoint | default "" }}",
"key": "{{ .Values.config.documentIntelligence.key | default "" }}"
}
},
"server": {
"autologin": {{ .Values.config.server.autologin }},
"bypassGuide": false,
"isDemo": {{ .Values.config.server.is_demo }},
"noSignups": {{ .Values.config.server.no_signups }},
"isProduction": {{ .Values.config.server.is_prod }},
"port": $TARGET_PORT,
"url": "{{ .Values.config.server.url }}"
}
}
EOF
fi
# Set permissions using numeric IDs
echo "Setting permissions..."
chmod -R 755 $CONFIG_DIR
find $CONFIG_DIR -type f -exec chmod 644 {} \;
find $CONFIG_DIR -type d -exec chmod 755 {} \;
# Change ownership by numeric ID
echo "Changing ownership to 1000:1000..."
chown -R 1000:1000 $CONFIG_DIR
echo "Configuration completed successfully."
ls -la $CONFIG_DIR

View File

@ -1,77 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "donetick.fullname" . }}-configmap
labels:
{{- include "donetick.labels" . | nindent 4 }}
data:
selfhosted.yaml: |
name: {{ .Values.config.name | quote }}
is_done_tick_dot_com: {{ .Values.config.is_done_tick_dot_com }}
is_user_creation_disabled: {{ .Values.config.is_user_creation_disabled }}
telegram:
token: {{ .Values.config.telegram.token | default "" | quote }}
pushover:
token: {{ .Values.config.pushover.token | default "" | quote }}
database:
type: {{ .Values.config.database.type | default "sqlite" | quote }}
migration: {{ .Values.config.database.migration }}
{{- if .Values.config.database.migration_skip }}
migration_skip: {{ .Values.config.database.migration_skip }}
{{- end }}
{{- if .Values.config.database.migration_retry }}
migration_retry: {{ .Values.config.database.migration_retry }}
{{- end }}
{{- if eq .Values.config.database.type "postgres" }}
{{- if not .Values.config.database.existingSecret }}
host: {{ .Values.config.database.host | quote }}
port: {{ .Values.config.database.port }}
user: {{ .Values.config.database.user | quote }}
password: {{ .Values.config.database.password | quote }}
name: {{ .Values.config.database.name | quote }}
{{- else }}
# Database credentials will be injected via environment variables from Secret
{{- end }}
{{- end }}
jwt:
{{- if .Values.config.jwt.existingSecret }}
# Secret will be injected from Secret
{{- else }}
secret: {{ .Values.config.jwt.secret | quote }}
{{- end }}
session_time: {{ .Values.config.jwt.session_time | quote }}
max_refresh: {{ .Values.config.jwt.max_refresh | quote }}
server:
port: {{ .Values.config.server.port }}
read_timeout: {{ .Values.config.server.read_timeout | quote }}
write_timeout: {{ .Values.config.server.write_timeout | quote }}
rate_period: {{ .Values.config.server.rate_period | quote }}
rate_limit: {{ .Values.config.server.rate_limit }}
cors_allow_origins:
{{- range .Values.config.server.cors_allow_origins }}
- {{ . | quote }}
{{- end }}
serve_frontend: {{ .Values.config.server.serve_frontend }}
scheduler_jobs:
due_job: {{ .Values.config.scheduler_jobs.due_job | quote }}
overdue_job: {{ .Values.config.scheduler_jobs.overdue_job | quote }}
pre_due_job: {{ .Values.config.scheduler_jobs.pre_due_job | quote }}
email:
host: {{ .Values.config.email.host | default "" | quote }}
port: {{ .Values.config.email.port | default "" | quote }}
key: {{ .Values.config.email.key | default "" | quote }}
email: {{ .Values.config.email.email | default "" | quote }}
appHost: {{ .Values.config.email.appHost | default "" | quote }}
oauth2:
{{- if .Values.config.oauth2.existingSecret }}
client_id: $DT_OAUTH2_CLIENT_ID
client_secret: $DT_OAUTH2_CLIENT_SECRET
{{- else }}
client_id: {{ .Values.config.oauth2.client_id | default "" | quote }}
client_secret: {{ .Values.config.oauth2.client_secret | default "" | quote }}
{{- end }}
auth_url: {{ .Values.config.oauth2.auth_url | default "" | quote }}
token_url: {{ .Values.config.oauth2.token_url | default "" | quote }}
user_info_url: {{ .Values.config.oauth2.user_info_url | default "" | quote }}
redirect_url: {{ .Values.config.oauth2.redirect_url | default "" | quote }}
name: {{ .Values.config.oauth2.name | default "" | quote }}

View File

@ -5,8 +5,8 @@ metadata:
labels:
{{- include "recipya.labels" . | nindent 4 }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
checksum/init-script: {{ include (print $.Template.BasePath "/configmap-init-script.yaml") . | sha256sum }}
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
@ -31,12 +31,48 @@ spec:
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
# Set security context for the pod
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
fsGroup: 1000
# Init container to configure the application
initContainers:
- name: init-config
image: alpine:3.18
command: ["/bin/sh", "-c"]
args:
- |
echo "Installing jq..."
apk add --no-cache jq
echo "Running initialization script..."
/scripts/init.sh
securityContext:
runAsUser: 0 # Run as root to modify config files
runAsGroup: 0
volumeMounts:
- name: data
mountPath: /home/recipya/.config/Recipya
- name: init-script
mountPath: /scripts
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 100m
memory: 128Mi
# Main application container
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
readOnlyRootFilesystem: false
capabilities:
drop:
- ALL
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.startupArgs }}
@ -47,7 +83,7 @@ spec:
{{- end }}
ports:
- name: http
containerPort: {{ .Values.config.server.port }}
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- if .Values.probes.liveness.enabled }}
livenessProbe:
@ -72,8 +108,9 @@ spec:
successThreshold: {{ .Values.probes.readiness.successThreshold }}
{{- end }}
env:
- name: RECIPYA_SERVER_PORT
value: {{ .Values.config.server.port | quote }}
# Critical environment variables for proper directory structure
- name: HOME
value: "/home/recipya"
- name: RECIPYA_SERVER_URL
value: {{ .Values.config.server.url | quote }}
- name: RECIPYA_SERVER_AUTOLOGIN
@ -97,16 +134,22 @@ spec:
name: {{ .Values.config.email.existingSecret }}
key: {{ .Values.config.email.sendgridKey }}
{{- else }}
{{- if .Values.config.email.address }}
- name: RECIPYA_EMAIL
valueFrom:
secretKeyRef:
name: {{ include "recipya.fullname" . }}-secrets
key: {{ .Values.config.email.addressKey }}
optional: true
{{- end }}
{{- if .Values.config.email.sendgrid }}
- name: RECIPYA_EMAIL_SENDGRID
valueFrom:
secretKeyRef:
name: {{ include "recipya.fullname" . }}-secrets
key: {{ .Values.config.email.sendgridKey }}
optional: true
{{- end }}
{{- end }}
{{- if .Values.config.documentIntelligence.existingSecret }}
@ -121,16 +164,22 @@ spec:
name: {{ .Values.config.documentIntelligence.existingSecret }}
key: {{ .Values.config.documentIntelligence.keyKey }}
{{- else }}
{{- if .Values.config.documentIntelligence.endpoint }}
- name: RECIPYA_DI_ENDPOINT
valueFrom:
secretKeyRef:
name: {{ include "recipya.fullname" . }}-secrets
key: {{ .Values.config.documentIntelligence.endpointKey }}
optional: true
{{- end }}
{{- if .Values.config.documentIntelligence.key }}
- name: RECIPYA_DI_KEY
valueFrom:
secretKeyRef:
name: {{ include "recipya.fullname" . }}-secrets
key: {{ .Values.config.documentIntelligence.keyKey }}
optional: true
{{- end }}
{{- end }}
{{- range .Values.env }}
@ -145,26 +194,24 @@ spec:
volumeMounts:
- name: data
mountPath: /home/recipya/.config/Recipya
{{- if not .Values.containerSecurityContext.readOnlyRootFilesystem }}
- name: tmp
mountPath: /tmp
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumes:
- name: data
persistentVolumeClaim:
claimName: {{ include "recipya.fullname" . }}-data
{{- if not .Values.containerSecurityContext.readOnlyRootFilesystem }}
- name: tmp
emptyDir: {}
{{- end }}
- name: init-script
configMap:
name: {{ include "recipya.fullname" . }}-init-script
defaultMode: 0755
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View File

@ -2,9 +2,9 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "donetick.fullname" . }}
name: {{ include "recipya.fullname" . }}
labels:
{{- include "donetick.labels" . | nindent 4 }}
{{- include "recipya.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
@ -35,7 +35,7 @@ spec:
pathType: {{ .pathType }}
backend:
service:
name: {{ include "donetick.fullname" $ }}
name: {{ include "recipya.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}

View File

@ -5,10 +5,13 @@ metadata:
name: {{ include "recipya.fullname" . }}-data
labels:
{{- include "recipya.labels" . | nindent 4 }}
{{- with .Values.persistence.annotations }}
annotations:
{{- if .Values.persistence.retain }}
"helm.sh/resource-policy": keep
{{- end }}
{{- with .Values.persistence.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}

View File

@ -1,3 +1,5 @@
{{- $createSecret := or (and (not .Values.config.email.existingSecret) (or .Values.config.email.address .Values.config.email.sendgrid)) (and (not .Values.config.documentIntelligence.existingSecret) (or .Values.config.documentIntelligence.endpoint .Values.config.documentIntelligence.key)) -}}
{{- if $createSecret }}
apiVersion: v1
kind: Secret
metadata:
@ -7,11 +9,20 @@ metadata:
type: Opaque
data:
{{- if not .Values.config.email.existingSecret }}
{{- if .Values.config.email.address }}
{{ .Values.config.email.addressKey }}: {{ .Values.config.email.address | b64enc }}
{{- end }}
{{- if .Values.config.email.sendgrid }}
{{ .Values.config.email.sendgridKey }}: {{ .Values.config.email.sendgrid | b64enc }}
{{- end }}
{{- end }}
{{- if not .Values.config.documentIntelligence.existingSecret }}
{{- if .Values.config.documentIntelligence.endpoint }}
{{ .Values.config.documentIntelligence.endpointKey }}: {{ .Values.config.documentIntelligence.endpoint | b64enc }}
{{- end }}
{{- if .Values.config.documentIntelligence.key }}
{{ .Values.config.documentIntelligence.keyKey }}: {{ .Values.config.documentIntelligence.key | b64enc }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -19,54 +19,72 @@ fullnameOverride: ""
podSecurityContext:
fsGroup: 1000
# Security context for the container
containerSecurityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1000
runAsGroup: 1000
containerSecurityContext: {}
# Service configuration
service:
type: ClusterIP
port: 8078
# Recipya configuration
config:
email:
address: ""
sendgrid: ""
existingSecret: ""
addressKey: "email"
sendgridKey: "sendgrid"
documentIntelligence:
endpoint: ""
key: ""
existingSecret: ""
endpointKey: "di_endpoint"
keyKey: "di_key"
server:
port: 8078
autologin: false
is_demo: false
is_prod: true
no_signups: false
url: "http://0.0.0.0"
# Ingress configuration
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
annotations: []
# traefik.ingress.kubernetes.io/router.entrypoints: websecure
# traefik.ingress.kubernetes.io/router.middlewares: default-recipya-headers@kubernetescrd
hosts:
- host: chart-example.local
- host: recipya.<domain>
paths:
- path: /
pathType: ImplementationSpecific
pathType: Prefix
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# Persistent volume claim
persistence:
enabled: true
enabled: false
accessMode: ReadWriteOnce
size: 1Gi
# storageClass: ""
size: 5Gi
storageClass: ""
annotations: {}
retain: true
# Resource limits and requests
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
# resources:
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 128Mi
# Node selector
nodeSelector: {}
@ -99,41 +117,17 @@ extraVolumes: []
probes:
liveness:
enabled: true
path: /health
initialDelaySeconds: 10
path: /
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
readiness:
enabled: true
path: /health
initialDelaySeconds: 10
path: /
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
# Recipya configuration
config:
email:
address: ""
sendgrid: ""
existingSecret: ""
addressKey: "email"
sendgridKey: "sendgrid"
documentIntelligence:
endpoint: ""
key: ""
existingSecret: ""
endpointKey: "di_endpoint"
keyKey: "di_key"
server:
port: 8078
autologin: false
is_demo: false
is_prod: false
no_signups: false
url: "http://0.0.0.0"