Compare commits

..

6 Commits

Author SHA1 Message Date
Bart Geesink
197ca0d3ca Add esphome config files 2023-09-18 19:23:17 +02:00
Bart Geesink
3332c3ff9c add portainer 2023-09-17 11:10:32 +02:00
Bart Geesink
86b55d67d0 add jellyfin 2023-09-17 11:10:18 +02:00
Bart Geesink
fa5e2869ec Add postgres backup 2023-09-17 11:10:08 +02:00
Bart Geesink
eed4145553 Add postgresconf 2023-09-17 11:09:37 +02:00
Bart Geesink
4f6ee18495 Hass: add config 2023-07-29 16:13:47 +02:00
42 changed files with 1754 additions and 0 deletions

View File

@ -0,0 +1,214 @@
---
## Set default image, imageTag, and imagePullPolicy.
## ref: https://hub.docker.com/_/postgres
##
image:
repository: postgres
tag: "11.5"
pullPolicy: IfNotPresent
## Optionally specify an imagePullSecret.
## Secret must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecret: myRegistrKeySecretName
## Expose the nifi service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: ClusterIP
annotations: {}
## clusterIP:
## Set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
# loadBalancerIP:
## Load Balancer sources
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
##
# loadBalancerSourceRanges:
# - 10.10.10.0/24
## Postgresql values
postgresql:
username: dsmr
password: dsmr
database: dsmr
port: 5432
# initdbArgs
# initdbWalDir
dataDir: /var/lib/postgresql/data
# extraEnv
## PostgreSQL configuration
## Specify runtime configuration parameters as a dict, using camelCase, e.g.
## {"sharedBuffers": "500MB"}
## Alternatively, you can put your postgresql.conf under the configs/ directory
## ref: https://www.postgresql.org/docs/current/static/runtime-config.html
##
# config: {"sharedBuffers": "500MB"}
## PostgreSQL client authentication configuration
## Specify content for pg_hba.conf
## Default: do not create pg_hba.conf
## Alternatively, you can put your pg_hba.conf under the files/ directory
# pghba: |-
# local all all trust
# host all all localhost trust
# host mydatabase mysuser 192.168.0.0/24 md5
#initdbscripts: |-
#!/bin/sh
#echo "helloworld"
## ConfigMap with PostgreSQL configuration
## NOTE: This will override postgresql.config and postgresql.pghba
# configMap:
##
## Init containers parameters:
## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup
##
volumePermissions:
enabled: true
image:
registry: docker.io
repository: debian
tag: buster-slim
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
## Init container Security Context
securityContext:
runAsUser: 0
## Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
ldap:
enabled: false
pgldapconfig: |-
# Reference: https://github.com/larskanis/pg-ldap-sync/blob/master/config/sample-config.yaml
# Connection parameters to LDAP server
ldap_connection:
host: example.com
port: 389
auth:
method: :simple
username: cn=admin,dc=example,dc=com
password: -password-goes-here-
# Search parameters for LDAP users which should be synchronized
ldap_users:
base: OU=People,dc=example,dc=com
# LDAP filter (according to RFC 2254)
# defines to users in LDAP to be synchronized
filter: (&(objectClass=person)(objectClass=organizationalPerson)(givenName=*)(sn=*))
# this attribute is used as PG role name
name_attribute: sAMAccountName
# lowercase name for use as PG role name
lowercase_name: true
ldap_groups:
base: OU=people,dc=example,dc=com
filter: (|(cn=group1)(cn=group2)(cn=group3))
# this attribute is used as PG role name
name_attribute: cn
# this attribute must reference to all member DN's of the given group
member_attribute: member
# Connection parameters to PostgreSQL server
# see also: http://rubydoc.info/gems/pg/PG/Connection#initialize-instance_method
pg_connection:
host:
dbname: postgres # the db name is usually "postgres"
user: postgres # the user name is usually "postgres"
password: postgres # kubectl get secret --namespace fadi <pod_name> -o jsonpath="{.data.postgresql-password}" | base64 --decode
pg_users:
# Filter for identifying LDAP generated users in the database.
# It's the WHERE-condition to "SELECT rolname, oid FROM pg_roles"
filter: rolcanlogin AND NOT rolsuper
# Options for CREATE RULE statements
create_options: LOGIN
pg_groups:
# Filter for identifying LDAP generated groups in the database.
# It's the WHERE-condition to "SELECT rolname, oid FROM pg_roles"
filter: NOT rolcanlogin AND NOT rolsuper
# Options for CREATE RULE statements
create_options: NOLOGIN
grant_options:
cron:
schedule: "*/1 * * * *"
repo: ceticasbl/pg-ldap-sync
tag: latest
restartPolicy: Never
mountPath: /workspace
subPath: ""
## Enable persistence using Persistent Volume Claims
## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
##
persistence:
enabled: true
mountPath: /var/lib/postgresql
subPath: ""
accessModes: [ReadWriteOnce]
## Storage Capacity for persistent volume
size: 10Gi
annotations: {}
existingClaim: nfs-postgres-claim
## Configure liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
##
#readinessProbe:
# httpGet:
# path: /
# port: http
# initialDelaySeconds: 60
# periodSeconds: 15
# timeoutSeconds: 10
#livenessProbe:
# httpGet:
# path: /
# port: http
# initialDelaySeconds: 60
# periodSeconds: 30
# timeoutSeconds: 10
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
## initdb scripts
## Specify dictionary of scripts to be run at first boot
## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory
##
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []

32
esphome/deurbel.yml Normal file
View File

@ -0,0 +1,32 @@
esphome:
name: deurbel
esp8266:
board: d1_mini
# Enable logging
logger:
# Enable Home Assistant API
api:
password: "ncc1701D"
ota:
password: "ncc1701D"
wifi:
ssid: "BARTANIET"
password: "ncc1701D"
# Enable fallback hotspot (captive portal) in case wifi connection fails
ap:
ssid: "Deurbel Fallback Hotspot"
password: "jeXv60iAylaV"
captive_portal:
binary_sensor:
- platform: gpio
pin: D3
name: "deurbelsignaal"
device_class: opening

88
esphome/nibemodbus.yaml Normal file
View File

@ -0,0 +1,88 @@
esphome:
name: nibemodbus
esp32:
board: esp32dev
framework:
type: arduino
# Enable logging
logger:
# Enable Home Assistant API
api:
password: ""
ota:
password: ""
wifi:
power_save_mode: none
ssid: "BARTANIET"
password: "ncc1701D"
# Enable fallback hotspot (captive portal) in case wifi connection fails
ap:
ssid: "Nibemodbus Fallback Hotspot"
password: "UgU5PVw37Yec"
captive_portal:
# Load nibe component
external_components:
- source: github://elupus/esphome-nibe
# Set pins required for LilyGo T-CAN485 board
output:
- platform: gpio
id: ENABLE_PIN # Enable the chip
pin:
number: GPIO19
inverted: true
- platform: gpio
id: SE_PIN # Enable autodirection
pin:
number: GPIO17
inverted: true
- platform: gpio
id: ENABLE_5V_PIN # Enable 5V pin for RS485 chip
pin:
number: GPIO16
inverted: true
# Configure uart that will be used
uart:
rx_pin: GPIO21
tx_pin: GPIO22
baud_rate: 9600
# Configure NibeGW
nibegw:
udp:
# The target address(s) to send data to. May be a multicast address.
target:
- ip: 10.86.0.108
port: 10090
# List of source address to accept data from, may be empty for no filter
source:
acknowledge:
- MODBUS40
# Constant replies to certain requests cabe made
constants:
- address: MODBUS40
token: ACCESSORY
data: [
0x0A, # MODBUS version low
0x00, # MODBUS version high
0x01, # MODBUS address?
]
# Some helper functions to restart ESPHome from HA
button:
- platform: restart
name: Nibegw Restart
- platform: safe_mode
name: Nibegw Safe Mode Boot

View File

@ -0,0 +1,154 @@
# Configure a default setup of Home Assistant (frontend, api, etc)
default_config:
config:
rflink:
host: 192.168.86.129
port: 20000
#
light:
- platform: rflink
automatic_add: true
device_defaults:
fire_event: true
signal_repetitions: 5
devices:
newkaku_01a3770e_1:
name: schemerlamp
type: switchable
ansluta_6cf2_0:
name: boekenkast
type: hybrid
sensor:
- platform: rflink
automatic_add: true
- platform: saj
name: inverter
host: 10.111.222.145
- platform: greenchoice
name: meterstanden
password: ncc1701D
username: anitajurgens@hotmail.com
afvalbeheer:
wastecollector: ACV
resources:
- restafval
- gft
- papier
- pmd
postcode: 6707JB
streetnumber: 43
suffix: D # (optional)
upcomingsensor: 1 # (optional)
dateformat: '%d-%m-%Y' # (optional)
dateonly: 0 # (optional)
name: "" # (optional)
nameprefix: 1 # (optional)
builtiniconsnew: 1 # (optional)
dutch: 1
# Uncomment this if you are using SSL/TLS, running in Docker container, etc.
http:
trusted_proxies:
- 10.0.0.0/8
- 192.168.86.0/24
base_url: https://home.geesink.org
use_x_forwarded_for: true
# Text to speech
tts:
- platform: google_translate
group: !include groups.yaml
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
logger:
default: error
logs:
rflink: debug
homeassistant.components.rflink: debug
mqtt:
light:
- schema: json
name: Keuken spotjes
command_topic: "zigbee2mqtt/keuken_leds/set"
state_topic: "zigbee2mqtt/keuken_leds"
color_temp: true
brightness: true
- schema: json
name: Woonkamer spotjes
command_topic: "zigbee2mqtt/woonkamer_plafond/set"
state_topic: "zigbee2mqtt/woonkamer_plafond"
color_temp: true
brightness: true
sensor:
- name: "Shelly sensor state"
state_topic: "shellies/shellydw-FABA15/sensor/state"
- name: "Lux"
state_topic: "shellies/shellydw-FABA15/sensor/lux"
- name: "Shelly sensor battery"
state_topic: "shellies/shellydw-FABA15/sensor/battery"
unit_of_measurement: "%"
recorder:
purge_keep_days: 5
zone:
- name: Home
latitude: 51.972899
longitude: 5.671515
radius: 150
vacuum:
- platform: roomba
host: 192.168.86.104
username: 69F7410851041790
password: ':1:1578481444:ZUR9Ill9x9bqsY5e'
google_assistant:
project_id: thuis-assistant
shopping_list:
#binary_sensor:
# - platform: rflink
# devices:
# eurodomest_0556f1_06:
# name: Afstandsbediening A
# off_delay: 5
# force_update: true
# ev1527_0aa90e_08:
# name: Afstandsbediening B
# off_delay: 5
# force_update: true
#
#media_player:
# - platform: samsungtv_tizen
# host: 192.168.86.116
# port: 8002
# mac: 44:5C:E9:7B:F5:4A
nibe:
systems:
- system: "87950"
units:
- unit: 0
categories: True
- unit: 1
categories: True
sensors:
- indoor_temperature
- hot_water_temperature
webhook:
my:

70
hass/configuration.yml Normal file
View File

@ -0,0 +1,70 @@
default_config:
http:
trusted_proxies:
- "10.233.0.0/16"
use_x_forwarded_for: true
frontend:
themes: !include_dir_merge_named themes
tts:
- platform: google_translate
recorder:
db_url: mysql://hass:Ier9Xi6toaquah1ohto3@galera-mariadb-galera/hass?charset=utf8mb4
group: !include groups.yaml
automation: !include automations.yaml
script: !include scripts.yaml
scene: !include scenes.yaml
rflink:
host: 192.168.86.128
port: 20000
#
light:
- platform: rflink
automatic_add: true
device_defaults:
fire_event: true
signal_repetitions: 5
devices:
newkaku_01a3770e_1:
name: schemerlamp
type: switchable
ansluta_6cf2_0:
name: boekenkast
type: hybrid
sensor:
- platform: rflink
automatic_add: true
- platform: saj
name: inverter
host: 10.111.222.145
mqtt:
light:
- schema: json
name: Keuken spotjes
command_topic: "zigbee2mqtt/keuken_leds/set"
state_topic: "zigbee2mqtt/keuken_leds"
color_temp: true
brightness: true
- schema: json
name: Woonkamer spotjes
command_topic: "zigbee2mqtt/woonkamer_plafond/set"
state_topic: "zigbee2mqtt/woonkamer_plafond"
color_temp: true
brightness: true
sensor:
- name: "Shelly sensor state"
state_topic: "shellies/shellydw-FABA15/sensor/state"
- name: "Lux"
state_topic: "shellies/shellydw-FABA15/sensor/lux"
- name: "Shelly sensor battery"
state_topic: "shellies/shellydw-FABA15/sensor/battery"
unit_of_measurement: "%"

25
hass/hassingress.yaml Normal file
View File

@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hassingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
haproxy.org/whitelist: "192.168.86.0/24,10.233.71.0/24"
spec:
ingressClassName: haproxy
rules:
- host: "home.geesink.org"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hasssvc
port:
number: 8123
tls:
-
secretName: hass-cert-prod
hosts:
- home.geesink.org

20
hass/hassingress2.yaml Normal file
View File

@ -0,0 +1,20 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: test
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
haproxy.org/whitelist: "192.168.86.0/24,10.233.71.0/24"
spec:
ingressClassName: haproxy
rules:
- host: "test.geesink.org"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hasssvc
port:
number: 8123

28
hass/homeass.yml Normal file
View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hass
labels:
app: hass
spec:
replicas: 1
selector:
matchLabels:
app: hass
template:
metadata:
labels:
app: hass
spec:
containers:
- name: hass
image: homeassistant/home-assistant:stable
ports:
- containerPort: 8123
volumeMounts:
- name: datadiskhass
mountPath: "/config"
volumes:
- name: datadiskhass
persistentVolumeClaim:
claimName: poddata

13
hass/old/hassingress.yaml Normal file
View File

@ -0,0 +1,13 @@
kind: Ingress
apiVersion: extensions/v1beta1
metadata:
name: ingress-hass80
spec:
rules:
- host: home.geesink.org
http:
paths:
- path: /
backend:
serviceName: hass
servicePort: 8123

View File

@ -0,0 +1,14 @@
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: hassingress80
spec:
entryPoints:
- web
routes:
- match: Host(`home.geesink.org`)
kind: Rule
services:
- name: hass
port: 8123

View File

@ -0,0 +1,15 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: hassingress443
spec:
entryPoints:
- websecure
routes:
- match: Host(`home.geesink.org`)
kind: Rule
services:
- name: hass
port: 8123
tls: # This route uses TLS
certResolver: myresolver

12
hass/old/pv.yaml Normal file
View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.86.86
path: "/volume1/nfs_k3s/hass/"

11
hass/old/pvc.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 1Mi

13
hass/old/servicehass.yml Normal file
View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: hass
spec:
selector:
app: hass
ports:
- name: hass
port: 8123
- name: udpnibe
port: 10090
protocol: UDP

10
hass/servicehass.yml Normal file
View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: hasssvc
spec:
selector:
app: hass
ports:
- name: hass
port: 8123

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: hasssvcudptraffic
spec:
type: LoadBalancer
loadBalancerIP: 10.86.0.108
selector:
app: hass
ports:
- name: udpnibe
port: 10090
protocol: UDP

25
jellyfin/jellyingress.yml Normal file
View File

@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
haproxy.org/whitelist: "192.168.86.0/24,10.233.71.0/24"
spec:
ingressClassName: haproxy
rules:
- host: "jellyfin.geesink.org"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
number: 8096
tls:
- hosts:
- home.geesink.org
secretName: jelly-cert-prod

11
jellyfin/pvc.yaml Normal file
View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-config
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: hassingress
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
haproxy.org/whitelist: "192.168.86.0/24,10.233.71.0/24"
spec:
ingressClassName: haproxy
rules:
- host: "home.geesink.org"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: hasssvc
port:
number: 8123
tls:
-
secretName: hass-cert-prod
hosts:
- home.geesink.org

View File

@ -0,0 +1,45 @@
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: haproxy
meta.helm.sh/release-name: portainer
meta.helm.sh/release-namespace: portainer
creationTimestamp: "2023-05-31T18:17:22Z"
generation: 7
labels:
app.kubernetes.io/instance: portainer
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: portainer
app.kubernetes.io/version: ce-latest-ee-2.18.3
helm.sh/chart: portainer-1.0.43
name: portainer
namespace: portainer
resourceVersion: "17547843"
uid: 2471a902-9fad-49af-a56b-9d4173b562c9
spec:
ingressClassName: haproxy
rules:
- host: portainer.geesink.org
http:
paths:
- backend:
service:
name: portainer
port:
number: 9443
path: /
pathType: Prefix
tls:
- hosts:
- portainer.geesink.org
secretName: portainer-cert-prod
status:
loadBalancer:
ingress:
- ip: 10.86.0.100

6
portainer/install Normal file
View File

@ -0,0 +1,6 @@
helm upgrade --install --create-namespace -n portainer portainer portainer/portainer \
--set service.type=ClusterIP \
--set tls.force=true \
--set ingress.enabled=true \
--set ingress.ingressClassName=haproxy

Binary file not shown.

View File

@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,14 @@
apiVersion: v2
appVersion: ce-latest-ee-2.18.3
description: Helm chart used to deploy the Portainer for Kubernetes
home: https://www.portainer.io
icon: https://github.com/portainer/portainer/raw/develop/app/assets/ico/apple-touch-icon.png
maintainers:
- email: platform-team@portainer.io
name: Portainer
url: https://www.portainer.io
name: portainer
sources:
- https://github.com/portainer/k8s
type: application
version: 1.0.43

View File

@ -0,0 +1,92 @@
# Deploy Portainer using Helm Chart
Before proceeding, ensure to create a namespace in advance.
For instance:
```bash
kubectl create namespace portainer
```
# Install the chart repository
```bash
helm repo add portainer https://portainer.github.io/k8s/
helm repo update
```
# Testing the Chart
Execute the following for testing the chart:
```bash
helm install --dry-run --debug portainer -n portainer deploy/helm/portainer
```
# Installing the Chart
Execute the following for installing the chart:
```bash
helm upgrade -i -n portainer portainer portainer/portainer
## Refer to the output NOTES on how-to access Portainer web
## An example is attached below
NOTES:
1. Get the application URL by running these commands:
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace portainer svc -w portainer'
export SERVICE_IP=$(kubectl get svc --namespace portainer portainer --template "{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}")
echo http://$SERVICE_IP:9000
http://20.40.176.8:9000
```
# Deleting the Chart
Execute the following for deleting the chart:
```bash
## Delete the Helm Chart
helm delete -n portainer portainer
## Delete the Namespace
kubectl delete namespace portainer
```
# Chart Configuration
The following table lists the configurable parameters of the Portainer chart and their default values. The values file can be found under `deploy/helm/portainer/values.yaml`.
*The parameters will be keep updating.*
| Parameter | Description | Default |
| - | - | - |
| `replicaCount` | Number of Portainer service replicas (ALWAYS set to 1) | `1` |
| `image.repository` | Portainer Docker Hub repository | `portainer/portainer-ce` |
| `image.tag` | Tag for the Portainer image | `latest` |
| `image.pullPolicy` | Portainer image pulling policy | `IfNotPresent` |
| `imagePullSecrets` | If Portainer image requires to be in a private repository | `nil` |
| `nodeSelector` | Used to apply a nodeSelector to the deployment | `{}` |
| `serviceAccount.annotations` | Annotations to add to the service account | `null` |
| `serviceAccount.name` | The name of the service account to use | `portainer-sa-clusteradmin` |
| `service.type` | Service Type for the main Portainer Service; ClusterIP, NodePort and LoadBalancer | `LoadBalancer` |
| `service.httpPort` | HTTP port for accessing Portainer Web | `9000` |
| `service.httpNodePort` | Static NodePort for accessing Portainer Web. Specify only if the type is NodePort | `30777` |
| `service.edgePort` | TCP port for accessing Portainer Edge | `8000` |
| `service.edgeNodePort` | Static NodePort for accessing Portainer Edge. Specify only if the type is NodePort | `30776` |
| `service.annotations` | Annotations to add to the service | `{}` |
| `feature.flags` | Enable one or more features separated by spaces. For instance, `--feat=open-amt` | `nil` |
| `ingress.enabled` | Create an ingress for Portainer | `false` |
| `ingress.ingressClassName` | For Kubernetes >= 1.18 you should specify the ingress-controller via the field `ingressClassName`. For instance, `nginx` | `nil` |
| `ingress.annotations` | Annotations to add to the ingress. For instane, `kubernetes.io/ingress.class: nginx` | `{}` |
| `ingress.hosts.host` | URL for Portainer Web. For instance, `portainer.example.io` | `nil` |
| `ingress.hosts.paths.path` | Path for the Portainer Web. | `/` |
| `ingress.hosts.paths.port` | Port for the Portainer Web. | `9000` |
| `ingress.tls` | TLS support on ingress. Must create a secret with TLS certificates in advance | `[]` |
| `resources` | Portainer resource requests and limits | `{}` |
| `tls.force` | Force Portainer to be configured to use TLS only | `false` |
| `tls.existingSecret` | Mount the existing TLS secret into the pod | `""` |
| `mtls.enable` | Option to specicy mtls Certs to be used by Portainer | `false` |
| `mtls.existingSecret` | Mount the existing mtls secret into the pod | `""` |
| `persistence.enabled` | Whether to enable data persistence | `true` |
| `persistence.existingClaim` | Name of an existing PVC to use for data persistence | `nil` |
| `persistence.size` | Size of the PVC used for persistence | `10Gi` |
| `persistence.annotations` | Annotations to apply to PVC used for persistence | `{}` |
| `persistence.storageClass` | StorageClass to apply to PVC used for persistence | `default` |
| `persistence.accessMode` | AccessMode for persistence | `ReadWriteOnce` |
| `persistence.selector` | Selector for persistence | `nil` |

View File

@ -0,0 +1,27 @@
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
Use the URL below to access the application
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ if .port }}:{{ .port }}{{ else }}{{ end }}{{.path}}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
Get the application URL by running these commands:
{{- if .Values.tls.force }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "portainer.fullname" . }})
{{- else }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ include "portainer.fullname" . }})
{{- end}}
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo https://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
Get the application URL by running these commands:
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "portainer.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "portainer.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo https://$SERVICE_IP:{{ .Values.service.httpsPort }}
{{- else if contains "ClusterIP" .Values.service.type }}
Get the application URL by running these commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "portainer.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].me$ echo "Visit http://127.0.0.1:9443 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9443:9443
{{- end }}

View File

@ -0,0 +1,87 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "portainer.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "portainer.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "portainer.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "portainer.labels" -}}
helm.sh/chart: {{ include "portainer.chart" . }}
{{ include "portainer.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "portainer.selectorLabels" -}}
app.kubernetes.io/name: {{ include "portainer.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "portainer.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "portainer.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Provide a pre-defined claim or a claim based on the Release
*/}}
{{- define "portainer.pvcName" -}}
{{- if .Values.persistence.existingClaim }}
{{- .Values.persistence.existingClaim }}
{{- else -}}
{{- template "portainer.fullname" . }}
{{- end -}}
{{- end -}}
{{/*
Generate a right Ingress apiVersion
*/}}
{{- define "ingress.apiVersion" -}}
{{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.GitVersion -}}
networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
networking.k8s.io/v1beta1
{{- else -}}
extensions/v1
{{- end }}
{{- end -}}

View File

@ -0,0 +1,193 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "portainer.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
io.portainer.kubernetes.application.stack: portainer
{{- include "portainer.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: "Recreate"
selector:
matchLabels:
{{- include "portainer.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "portainer.selectorLabels" . | nindent 8 }}
spec:
nodeSelector: {{- toYaml .Values.nodeSelector | nindent 8 -}}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "portainer.serviceAccountName" . }}
volumes:
{{- if .Values.persistence.enabled }}
- name: "data"
persistentVolumeClaim:
claimName: {{ template "portainer.pvcName" . }}
{{- end }}
{{- if .Values.tls.existingSecret }}
- name: certs
secret:
secretName: {{ .Values.tls.existingSecret }}
{{- end }}
{{- if .Values.mtls.existingSecret }}
- name: mtlscerts
secret:
secretName: {{ .Values.mtls.existingSecret }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- if .Values.enterpriseEdition.enabled }}
image: "{{ .Values.enterpriseEdition.image.repository }}:{{ .Values.enterpriseEdition.image.tag }}"
imagePullPolicy: {{ .Values.enterpriseEdition.image.pullPolicy }}
{{- else }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- end }}
args:
{{- if .Values.tls.force }}
- --http-disabled
{{- end }}
{{- if .Values.tls.existingSecret }}
- --sslcert=/certs/tls.crt
- --sslkey=/certs/tls.key
{{- end }}
{{- if .Values.mtls.existingSecret }}
- --mtlscacert=/certs/mtls/mtlsca.crt
- --mtlscert=/certs/mtls/mtlscert.crt
- --mtlskey=/certs/mtls/mtlskey.key
{{- end }}
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.edgeNodePort))) }}
- '--tunnel-port={{ .Values.service.edgeNodePort }}'
{{- end }}
{{- if (not (empty .Values.feature.flags)) }}
- '{{ .Values.feature.flags }}'
{{- end }}
volumeMounts:
{{- if .Values.persistence.enabled }}
- name: data
mountPath: /data
{{- end }}
{{- if .Values.tls.existingSecret }}
- name: certs
mountPath: /certs
readOnly: true
{{- end }}
{{- if .Values.mtls.existingSecret }}
- name: mtlscerts
mountPath: /certs/mtls
readOnly: true
{{- end }}
ports:
{{- if not .Values.tls.force }}
- name: http
containerPort: 9000
protocol: TCP
{{- end }}
- name: https
containerPort: 9443
protocol: TCP
- name: tcp-edge
containerPort: 8000
protocol: TCP
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 45
httpGet:
path: /
{{- if .Values.tls.force }}
port: 9443
scheme: HTTPS
{{- else }}
{{- if .Values.enterpriseEdition.enabled }}
{{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.enterpriseEdition.image.tag }}
{{- if eq (semver .Values.enterpriseEdition.image.tag | (semver "2.7.0").Compare) -1 }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end }}
{{- else }}
{{- if eq .Values.enterpriseEdition.image.tag "latest" }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end }}
{{- end}}
{{- else }}
{{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.image.tag }}
{{- if eq (semver .Values.image.tag | (semver "2.6.0").Compare) -1 }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end}}
{{- else }}
{{- if eq .Values.image.tag "latest" }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end }}
{{- end }}
{{- end }}
{{- end }}
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 45
httpGet:
path: /
{{- if .Values.tls.force }}
port: 9443
scheme: HTTPS
{{- else }}
{{- if .Values.enterpriseEdition.enabled }}
{{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.enterpriseEdition.image.tag }}
{{- if eq (semver .Values.enterpriseEdition.image.tag | (semver "2.7.0").Compare) -1 }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end }}
{{- else }}
{{- if eq .Values.enterpriseEdition.image.tag "latest" }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end }}
{{- end}}
{{- else }}
{{- if regexMatch "^[0-9]+\\.[0-9]+\\.[0-9]+$" .Values.image.tag }}
{{- if eq (semver .Values.image.tag | (semver "2.6.0").Compare) -1 }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end}}
{{- else }}
{{- if eq .Values.image.tag "latest" }}
port: 9443
scheme: HTTPS
{{- else }}
port: 9000
scheme: HTTP
{{- end }}
{{- end }}
{{- end }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}

View File

@ -0,0 +1,60 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "portainer.fullname" . -}}
{{- $tlsforced := .Values.tls.force -}}
{{- $apiVersion := include "ingress.apiVersion" . -}}
apiVersion: {{ $apiVersion }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- with .Values.ingress.ingressClassName }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path | default "/" }}
{{- if eq $apiVersion "networking.k8s.io/v1" }}
pathType: Prefix
{{- end }}
backend:
{{- if eq $apiVersion "networking.k8s.io/v1" }}
service:
name: {{ $fullName }}
port:
{{- if $tlsforced }}
number: {{ .port | default 9443 }}
{{- else }}
number: {{ .port | default 9000 }}
{{- end }}
{{- else }}
serviceName: {{ $fullName }}
{{- if $tlsforced }}
servicePort: {{ .port | default 9443 }}
{{- else }}
servicePort: {{ .port | default 9000 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,6 @@
{{ if .Values.createNamespace }}
apiVersion: v1
kind: Namespace
metadata:
name: portainer
{{ end }}

View File

@ -0,0 +1,32 @@
{{- if .Values.persistence.enabled -}}
{{- if not .Values.persistence.existingClaim -}}
---
kind: "PersistentVolumeClaim"
apiVersion: "v1"
metadata:
name: {{ template "portainer.fullname" . }}
namespace: {{ .Release.Namespace }}
annotations:
{{- if .Values.persistence.storageClass }}
volume.beta.kubernetes.io/storage-class: {{ .Values.persistence.storageClass | quote }}
{{- else }}
volume.alpha.kubernetes.io/storage-class: "generic"
{{- end }}
{{- if .Values.persistence.annotations }}
{{ toYaml .Values.persistence.annotations | indent 2 }}
{{ end }}
labels:
io.portainer.kubernetes.application.stack: portainer
{{- include "portainer.labels" . | nindent 4 }}
spec:
accessModes:
- {{ default "ReadWriteOnce" .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.selector }}
selector:
{{ toYaml .Values.persistence.selector | indent 4 }}
{{ end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "portainer.fullname" . }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
namespace: {{ .Release.Namespace }}
name: {{ include "portainer.serviceAccountName" . }}

View File

@ -0,0 +1,47 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "portainer.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
io.portainer.kubernetes.application.stack: portainer
{{- include "portainer.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{- range $key, $value := .Values.service.annotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
{{- if not .Values.tls.force }}
- port: {{ .Values.service.httpPort }}
targetPort: 9000
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.httpNodePort))) }}
nodePort: {{ .Values.service.httpNodePort}}
{{- end }}
{{- end }}
- port: {{ .Values.service.httpsPort }}
targetPort: 9443
protocol: TCP
name: https
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.httpsNodePort))) }}
nodePort: {{ .Values.service.httpsNodePort}}
{{- end }}
{{- if (eq .Values.service.type "NodePort") }}
- port: {{ .Values.service.edgeNodePort }}
targetPort: {{ .Values.service.edgeNodePort }}
{{- else }}
- port: {{ .Values.service.edgePort }}
targetPort: {{ .Values.service.edgePort }}
{{- end }}
protocol: TCP
name: edge
{{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.edgeNodePort))) }}
nodePort: {{ .Values.service.edgeNodePort }}
{{- end }}
selector:
{{- include "portainer.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "portainer.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if not .Values.disableTest -}}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "portainer.fullname" . }}-test-connection"
namespace: {{ .Release.Namespace }}
labels:
{{- include "portainer.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "portainer.fullname" . }}:{{ .Values.service.httpPort }}']
restartPolicy: Never
{{ end }}

View File

@ -0,0 +1,77 @@
# Default values for portainer.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# If enterpriseEdition is enabled, then use the values below _instead_ of those in .image
enterpriseEdition:
enabled: false
image:
repository: portainer/portainer-ee
tag: 2.18.3
pullPolicy: Always
image:
repository: portainer/portainer-ce
tag: 2.18.3
pullPolicy: Always
imagePullSecrets: []
nodeSelector: {}
serviceAccount:
annotations: {}
name: portainer-sa-clusteradmin
service:
# Set the httpNodePort and edgeNodePort only if the type is NodePort
# For Ingress, set the type to be ClusterIP and set ingress.enabled to true
# For Cloud Providers, set the type to be LoadBalancer
type: NodePort
httpPort: 9000
httpsPort: 9443
httpNodePort: 30777
httpsNodePort: 30779
edgePort: 8000
edgeNodePort: 30776
annotations: {}
tls:
# If set, Portainer will be configured to use TLS only
force: false
# If set, will mount the existing secret into the pod
existingSecret: ""
mtls:
# If set, Portainer will be configured to use mTLS only
enable: false
# If set, will mount the existing secret into the pod
existingSecret: ""
feature:
flags: ""
ingress:
enabled: false
ingressClassName: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# Only use below if tls.force=true
# nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# Note: Hosts and paths are of type array
hosts:
- host:
paths: []
# - path: "/"
tls: []
resources: {}
persistence:
enabled: true
size: "10Gi"
annotations: {}
storageClass:
existingClaim:

81
portainer/values.yaml Normal file
View File

@ -0,0 +1,81 @@
# Default values for portainer.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
# If enterpriseEdition is enabled, then use the values below _instead_ of those in .image
enterpriseEdition:
enabled: true
image:
repository: portainer/portainer-ee
tag: 2.18.3
pullPolicy: Always
image:
repository: portainer/portainer-ce
tag: 2.18.3
pullPolicy: Always
imagePullSecrets: []
nodeSelector: {}
serviceAccount:
annotations: {}
name: portainer-sa-clusteradmin
service:
# Set the httpNodePort and edgeNodePort only if the type is NodePort
# For Ingress, set the type to be ClusterIP and set ingress.enabled to true
# For Cloud Providers, set the type to be LoadBalancer
type: ClusterIP
httpPort: 9000
httpsPort: 9443
annotations:
haproxy.org/server-ssl: "true"
tls:
# If set, Portainer will be configured to use TLS only
force: true
# If set, will mount the existing secret into the pod
existingSecret: ""
mtls:
# If set, Portainer will be configured to use mTLS only
enable: false
# If set, will mount the existing secret into the pod
existingSecret: ""
feature:
flags: ""
ingress:
enabled: true
ingressClassName: haproxy
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: haproxy
# kubernetes.io/ingress.class: nginx
# Only use below if tls.force=true
# nginx.ingress.kubernetes.io/backend-protocol: HTTPS
# Note: Hosts and paths are of type array
hosts:
- host: portainer.geesink.org
paths:
- path: "/"
tls:
- hosts:
- portainer.geesink.org
secretName: portainer-cert-prod
resources: {}
persistence:
enabled: true
size: "10Gi"
annotations: {}
storageClass:
existingClaim:

15
portainer/values.yaml.old Normal file
View File

@ -0,0 +1,15 @@
USER-SUPPLIED VALUES:
enterpriseEdition:
enabled: true
ingress:
annotations:
kubernetes.io/ingress.class: haproxy
enabled: true
hosts:
- host: portainer.geesink.org
paths:
- path: '"/"'
service:
type: ClusterIP
tls:
force: true

15
portainer/values2.yaml Normal file
View File

@ -0,0 +1,15 @@
USER-SUPPLIED VALUES:
enterpriseEdition:
enabled: true
ingress:
annotations:
kubernetes.io/ingress.class: haproxy
enabled: true
hosts:
- host: portainer.geesink.org
paths:
- path: '"/"'
service:
type: ClusterIP
tls:
force: true

41
postgres_backup/job.yml Normal file
View File

@ -0,0 +1,41 @@
apiVersion: batch/v1
kind: Job
metadata:
name: mysql-backup
spec:
template:
spec:
containers:
- name: mysql-backup
image: mariadb:latest
command: [ "/bin/sh" ]
args: [ "-c", "sleep 3600" ]
env:
- name: MYSQL_HOST
value: galera-mariadb-galera
- name: MYSQL_USER
value: backupuser
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: backupscrets
key: backupsecret
volumeMounts:
- name: backup-volume
mountPath: /backup
subPath: mysql
- name: backup-script
mountPath: /backup-script.sh
subPath: backup-script.sh
readOnly: true
restartPolicy: OnFailure
volumes:
- name: backup-volume
nfs:
server: 192.168.86.86
path: /volume1/backupk8s
- name: backup-script
configMap:
name: backup-script
backoffLimit: 1

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-backup-mysql
spec:
capacity:
storage: 1Mi
accessModes:
- ReadWriteMany
nfs:
server: 192.168.86.86
path: "/volume1/backupk8s/mysql/"
storageClassName: nfs

45
postgres_backup/script.sh Normal file
View File

@ -0,0 +1,45 @@
#!/bin/bash
# Set default values
PG_USER=${PG_USER:-postgres}
PG_PASSWORD=${PG_PASSWORD:-postgres}
PG_HOST=${PG_HOST:-localhost}
PG_PORT=${PG_PORT:-5432}
# Set backup directory
BACKUP_DIR="/data/backups/postgres"
# Generate backup timestamp with format "YYYY-MM-DD_HH-MM-SS"
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
# Set backup file name with timestamp
BACKUP_FILE="${BACKUP_DIR}/pg_dump_all_${TIMESTAMP}.sql.gz"
# Dump all databases to backup file
pg_dumpall --clean --if-exists --dbname=postgres --username="${PG_USER}" --host="${PG_HOST}" --port="${PG_PORT}" | gzip >"${BACKUP_FILE}"
# Set retention periods
WEEKLY_THRESHOLD=$(date --date="-1 week" +"%s")
MONTHLY_THRESHOLD=$(date --date="-1 month" +"%s")
YEARLY_THRESHOLD=$(date --date="-1 year" +"%s")
# Rename backups that are old enough to weekly, monthly, or yearly
find $BACKUP_DIR -maxdepth 1 -type f -name "*.gz" -not -name "*weekly*" \
-not -name "*monthly*" -not -name "*yearly*" \
-mtime +7 -mtime -28 -exec mv -f {} ${BACKUP_DIR}/pg_dump_all_weekly.sql.gz \; # move daily backups older than 7 days to weekly backups
find $BACKUP_DIR -maxdepth 1 -type f -name "*.gz" -not -name "*monthly*" \
-not -name "*yearly*" -mtime +28 -mtime -365 \
-exec mv -f {} ${BACKUP_DIR}/pg_dump_all_monthly.sql.gz \; # move weekly backups older than 28 days to monthly backups
find $BACKUP_DIR -maxdepth 1 -type f -name "*.gz" -not -name "*yearly*" \
-mtime +365 -exec mv -f {} ${BACKUP_DIR}/pg_dump_all_yearly.sql.gz \; # move monthly backups older than 365 days to yearly backups
# Clean up backups older than retention periods
find $BACKUP_DIR -maxdepth 1 -type f -name "*.gz" \
-mtime +7 -mtime -28 -not -name "*weekly*" -not -name "*monthly*" -exec rm -f {} + # remove daily backups older than 7 days but exclude those that are weekly or monthly backups
find $BACKUP_DIR -maxdepth 1 -type f -name "*.gz" \
-mtime +28 -mtime -365 -not -name "*monthly*" -not -name "*yearly*" -exec rm -f {} + # remove weekly backups older than 28 days but exclude those that are monthly or yearly backups
find $BACKUP_DIR -maxdepth 1 -type f -name "*.gz" \
-mtime +365 -not -name "*yearly*" -exec rm -f {} + # remove monthly backups older than 365 days but exclude those that are yearly backups
# Print confirmation message
echo "Done. Backup file: ${BACKUP_FILE}"