mirror of
https://gitlab.sectorq.eu/home/docker-compose.git
synced 2025-12-14 18:34:53 +01:00
build
This commit is contained in:
@@ -1 +0,0 @@
|
||||
l4c1j4yd33Du5lo
|
||||
@@ -1,230 +0,0 @@
|
||||
version: '3.9'
|
||||
services:
|
||||
homeassistant:
|
||||
network_mode: host
|
||||
image: ${DOCKER_REGISTRY:-}ghcr.io/home-assistant/home-assistant:latest
|
||||
volumes:
|
||||
- /share/docker_data/ha/:/config
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /run/dbus:/run/dbus:ro
|
||||
privileged: true
|
||||
environment:
|
||||
- DISABLE_JEMALLOC=value
|
||||
- TZ=Europe/Bratislava
|
||||
dns:
|
||||
- 192.168.77.101
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
homepage.group: Smarthome
|
||||
homepage.name: Home Assistant
|
||||
homepage.weight: 1
|
||||
homepage.icon: home-assistant.png
|
||||
homepage.href: https://ha.sectorq.eu
|
||||
homepage.description: 3D Printing
|
||||
homepage.server: my-docker
|
||||
homepage.container: HomeAssistant
|
||||
homepage.widget.type: homeassistant
|
||||
homepage.widget.url: https://ha.sectorq.eu
|
||||
homepage.widget.key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiIzOTk5NGJjYjIzYjk0YzExYmM5OWZiNTBlNzU0N2M2YyIsImlhdCI6MTc0MDM5OTY4NCwiZXhwIjoyMDU1NzU5Njg0fQ.LDebvPGreyZzlWT1CylHSdSt8i_cWO72HnNCsCAIaG8
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
esphome:
|
||||
image: ${DOCKER_REGISTRY:-}esphome/esphome:latest
|
||||
volumes:
|
||||
- /share/docker_data/esphome/config:/config
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
privileged: true
|
||||
network_mode: host
|
||||
environment:
|
||||
- USERNAME=jaydee
|
||||
- PASSWORD=jaydee1
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
homepage.group: Smarthome
|
||||
homepage.name: ESPHome
|
||||
homepage.weight: 1
|
||||
homepage.icon: esphome.png
|
||||
homepage.href: https://esphome.sectorq.eu
|
||||
homepage.description: 3D Printing
|
||||
homepage.server: my-docker
|
||||
homepage.container: esphome
|
||||
homepage.widget.type: esphome
|
||||
homepage.widget.url: https://esphome.sectorq.eu
|
||||
homepage.widget.username: jaydee
|
||||
homepage.widget.password: jaydee1
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
wyoming-piper-en:
|
||||
image: ${DOCKER_REGISTRY:-}rhasspy/wyoming-piper
|
||||
ports:
|
||||
- 10200:10200
|
||||
volumes:
|
||||
- /share/docker_data/piper/english:/data
|
||||
command: --data-dir /data --voice en_US-lessac-medium
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
wyoming-whisper-en:
|
||||
image: ${DOCKER_REGISTRY:-}rhasspy/wyoming-whisper
|
||||
ports:
|
||||
- 10300:10300
|
||||
volumes:
|
||||
- /share/docker_data/whisper/english:/data
|
||||
command: --data-dir /data --model tiny-int8 --language en
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
openwakeword:
|
||||
image: ${DOCKER_REGISTRY:-}rhasspy/wyoming-openwakeword:latest
|
||||
command: --preload-model 'ok_nabu' --custom-model-dir /custom --model 'ok nabu'
|
||||
--model 'ok_nabu' --uri 'tcp://0.0.0.0:10400' --threshold 0.7 --trigger-level
|
||||
2 --debug
|
||||
volumes:
|
||||
- /share/docker_data/openwakeword-data:/data
|
||||
- /share/docker_data/openwakeword-data:/custom
|
||||
environment:
|
||||
- TZ=Europe/Bratislava
|
||||
ports:
|
||||
- 10400:10400
|
||||
- 10400:10400/udp
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
matter-server:
|
||||
image: ${DOCKER_REGISTRY:-}ghcr.io/home-assistant-libs/python-matter-server:stable
|
||||
security_opt:
|
||||
- apparmor=unconfined
|
||||
volumes:
|
||||
- /share/docker_data/matter-server:/data
|
||||
- /run/dbus:/run/dbus:ro
|
||||
network_mode: host
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
music-assistant-server:
|
||||
image: ${DOCKER_REGISTRY:-}ghcr.io/music-assistant/server:latest
|
||||
network_mode: host
|
||||
volumes:
|
||||
- /share/docker_data/music-assistant-server/data:/data/
|
||||
cap_add:
|
||||
- SYS_ADMIN
|
||||
- DAC_READ_SEARCH
|
||||
security_opt:
|
||||
- apparmor:unconfined
|
||||
environment:
|
||||
- LOG_LEVEL=info
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
homepage.group: Smarthome
|
||||
homepage.name: music-assistant
|
||||
homepage.weight: 1
|
||||
homepage.icon: music-assistant.png
|
||||
homepage.href: https://music.sectorq.eu
|
||||
homepage.description: Music
|
||||
homepage.server: my-docker
|
||||
homepage.container: music-assistant-server
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
influxdb:
|
||||
ports:
|
||||
- 8086:8086
|
||||
volumes:
|
||||
- /share/docker_data/influxdb/data:/var/lib/influxdb2
|
||||
- /share/docker_data/influxdb/config:/etc/influxdb2
|
||||
secrets:
|
||||
- influxdb2-admin-username
|
||||
- influxdb2-admin-password
|
||||
- influxdb2-admin-token
|
||||
environment:
|
||||
- DOCKER_INFLUXDB_INIT_MODE=setup
|
||||
- DOCKER_INFLUXDB_INIT_USERNAME=ha
|
||||
- DOCKER_INFLUXDB_INIT_PASSWORD=haHAhaHA
|
||||
- DOCKER_INFLUXDB_INIT_ORG=ha
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=ha
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=mytoken123
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN_FILE=/run/secrets/influxdb2-admin-token
|
||||
image: ${DOCKER_REGISTRY:-}influxdb:2
|
||||
healthcheck:
|
||||
test: echo test > /var/lib/influxdb2/hc || exit 1
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 2
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
com.centurylinklabs.watchtower.enable: true
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
secrets:
|
||||
influxdb2-admin-username:
|
||||
file: .env.influxdb2-admin-username
|
||||
influxdb2-admin-password:
|
||||
file: .env.influxdb2-admin-password
|
||||
influxdb2-admin-token:
|
||||
file: .env.influxdb2-admin-token
|
||||
@@ -1,185 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import docker
|
||||
import os
|
||||
import datetime
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
BACKUP_DIR = "/backup/docker_volumes"
|
||||
|
||||
import requests
|
||||
|
||||
PORTAINER_URL = "https://port.sectorq.eu/api"
|
||||
API_KEY = "ptr_/5RkMCT/j3BTaL32vMSDtXFi76yOXRKVFOrUtzMsl5Y="
|
||||
HEADERS = {"X-API-Key": f"{API_KEY}"}
|
||||
ENDPOINT_ID = 4
|
||||
def get_stack_by_name(stack_name):
|
||||
url = f"{PORTAINER_URL}/stacks"
|
||||
r = requests.get(url, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
stacks = r.json()
|
||||
for s in stacks:
|
||||
if s['Name'] == stack_name:
|
||||
return s
|
||||
return None
|
||||
|
||||
def stop_stack(stack_name):
|
||||
stack = get_stack_by_name(stack_name)
|
||||
if not stack:
|
||||
print(f"[INFO] Stack {stack_name} not found.")
|
||||
return
|
||||
|
||||
print(f"Stopping stack {stack_name} via Portainer API...")
|
||||
url = f"{PORTAINER_URL}/stacks/{stack['Id']}/stop?endpointId={ENDPOINT_ID}"
|
||||
print("URL:", url)
|
||||
r = requests.post(url, headers=HEADERS)
|
||||
if r.status_code == 200:
|
||||
print(f"[OK] Stack {stack_name} stopped.")
|
||||
else:
|
||||
print(f"[ERROR] Failed to stop stack: {r.status_code} {r.text}")
|
||||
|
||||
def start_stack(stack_name):
|
||||
stack = get_stack_by_name(stack_name)
|
||||
if not stack:
|
||||
print(f"[INFO] Stack {stack_name} not found.")
|
||||
return
|
||||
|
||||
print(f"Starting stack {stack_name} via Portainer API...")
|
||||
url = f"{PORTAINER_URL}/stacks/{stack['Id']}/start?endpointId={ENDPOINT_ID}"
|
||||
r = requests.post(url, headers=HEADERS)
|
||||
if r.status_code == 200:
|
||||
print(f"[OK] Stack {stack_name} started.")
|
||||
else:
|
||||
print(f"[ERROR] Failed to start stack: {r.status_code} {r.text}")
|
||||
|
||||
def to_timestamp():
|
||||
return datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
def get_containers_using_volume(client, vol_name):
|
||||
containers = []
|
||||
for c in client.containers.list():
|
||||
for m in c.attrs['Mounts']:
|
||||
if m.get('Name') == vol_name:
|
||||
containers.append(c)
|
||||
return containers
|
||||
|
||||
def backup_volumes(client):
|
||||
os.makedirs(BACKUP_DIR, exist_ok=True)
|
||||
timestamp = to_timestamp()
|
||||
volumes = client.volumes.list()
|
||||
if not volumes:
|
||||
print("No Docker volumes found.")
|
||||
return
|
||||
|
||||
for vol in volumes:
|
||||
vol_name = vol.name
|
||||
backup_file = os.path.join(BACKUP_DIR, f"{vol_name}-{timestamp}.tar.gz")
|
||||
containers = get_containers_using_volume(client, vol_name)
|
||||
|
||||
# Track stacks that need to be stopped
|
||||
stacks_to_restart = {}
|
||||
|
||||
for c in containers:
|
||||
stack = c.labels.get('com.docker.stack.namespace')
|
||||
if stack:
|
||||
# Stop stack instead of individual container
|
||||
if stack not in stacks_to_restart:
|
||||
stacks_to_restart[stack] = True
|
||||
stop_stack(stack)
|
||||
else:
|
||||
# Stop individual container
|
||||
print(f"Stopping container {c.name} for backup...")
|
||||
c.stop()
|
||||
|
||||
# Backup using busybox
|
||||
try:
|
||||
print(f"Backing up volume {vol_name} → {backup_file}")
|
||||
client.containers.run(
|
||||
image="busybox",
|
||||
command=f"tar czf /backup/{vol_name}-{timestamp}.tar.gz -C /volume .",
|
||||
remove=True,
|
||||
volumes={
|
||||
vol_name: {'bind': '/volume', 'mode': 'ro'},
|
||||
BACKUP_DIR: {'bind': '/backup', 'mode': 'rw'}
|
||||
},
|
||||
)
|
||||
print(f"[OK] Backup completed: {backup_file}")
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Failed to backup {vol_name}: {e}")
|
||||
|
||||
# Restart individual containers
|
||||
for c in containers:
|
||||
stack = c.labels.get('com.docker.stack.namespace')
|
||||
if not stack:
|
||||
print(f"Starting container {c.name} after backup...")
|
||||
c.start()
|
||||
|
||||
# Restart stacks
|
||||
for stack in stacks_to_restart.keys():
|
||||
start_stack(stack)
|
||||
|
||||
|
||||
def restore_volume(client, vol_name, backup_file):
|
||||
containers = get_containers_using_volume(client, vol_name)
|
||||
stacks_to_restart = {}
|
||||
|
||||
for c in containers:
|
||||
stack = c.labels.get('com.docker.stack.namespace')
|
||||
if stack:
|
||||
if stack not in stacks_to_restart:
|
||||
stacks_to_restart[stack] = True
|
||||
stop_stack(stack)
|
||||
else:
|
||||
print(f"Stopping container {c.name} for restore...")
|
||||
c.stop()
|
||||
|
||||
try:
|
||||
print(f"Restoring volume {vol_name} from {backup_file}")
|
||||
client.containers.run(
|
||||
image="busybox",
|
||||
command=f"tar xzf /backup/{os.path.basename(backup_file)} -C /volume",
|
||||
remove=True,
|
||||
volumes={
|
||||
vol_name: {'bind': '/volume', 'mode': 'rw'},
|
||||
os.path.dirname(backup_file): {'bind': '/backup', 'mode': 'rw'}
|
||||
},
|
||||
)
|
||||
print(f"[OK] Restore completed: {vol_name}")
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Failed to restore {vol_name}: {e}")
|
||||
|
||||
# Restart containers
|
||||
for c in containers:
|
||||
stack = c.labels.get('com.docker.stack.namespace')
|
||||
if not stack:
|
||||
print(f"Starting container {c.name} after restore...")
|
||||
c.start()
|
||||
|
||||
# Restart stacks
|
||||
for stack in stacks_to_restart.keys():
|
||||
stack_file = f"/deployments/{stack}.yml"
|
||||
if os.path.exists(stack_file):
|
||||
start_stack(stack_file)
|
||||
else:
|
||||
print(f"[WARNING] Stack file {stack_file} not found. Start manually.")
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Backup or restore Docker volumes.")
|
||||
parser.add_argument("action", choices=["backup", "restore"], help="Action to perform")
|
||||
parser.add_argument("--volume", help="Volume name (required for restore)")
|
||||
parser.add_argument("--file", help="Backup file (required for restore)")
|
||||
args = parser.parse_args()
|
||||
|
||||
client = docker.from_env()
|
||||
|
||||
if args.action == "backup":
|
||||
backup_volumes(client)
|
||||
elif args.action == "restore":
|
||||
if not args.volume or not args.file:
|
||||
print("[ERROR] --volume and --file are required for restore")
|
||||
sys.exit(1)
|
||||
restore_volume(client, args.volume, args.file)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,92 +0,0 @@
|
||||
import docker
|
||||
import requests
|
||||
import os
|
||||
import time
|
||||
PORTAINER_URL = "https://port.sectorq.eu/api"
|
||||
API_KEY = "ptr_/5RkMCT/j3BTaL32vMSDtXFi76yOXRKVFOrUtzMsl5Y="
|
||||
HEADERS = {"X-API-Key": f"{API_KEY}"}
|
||||
ENDPOINT_ID = 4
|
||||
client = docker.from_env()
|
||||
|
||||
# Keep track of which stacks we already stopped
|
||||
stopped_stacks = {}
|
||||
|
||||
def get_stack_by_name(stack_name):
|
||||
url = f"{PORTAINER_URL}/stacks"
|
||||
r = requests.get(url, headers=HEADERS)
|
||||
r.raise_for_status()
|
||||
for s in r.json():
|
||||
if s['Name'] == stack_name:
|
||||
return s
|
||||
return None
|
||||
|
||||
def stop_stack(stack_name):
|
||||
if stack_name in stopped_stacks:
|
||||
return
|
||||
stack = get_stack_by_name(stack_name)
|
||||
if not stack:
|
||||
print(f"[INFO] Stack {stack_name} not found, skipping stop")
|
||||
return
|
||||
url = f"{PORTAINER_URL}/stacks/{stack['Id']}/stop?endpointId={ENDPOINT_ID}"
|
||||
print(f"url: {url}")
|
||||
r = requests.post(url, headers=HEADERS)
|
||||
if r.status_code == 200:
|
||||
print(f"[OK] Stack {stack_name} stopped")
|
||||
else:
|
||||
print(f"[ERROR] Failed to stop stack {stack_name}: {r.text}")
|
||||
stopped_stacks[stack_name] = True
|
||||
|
||||
def start_stack(stack_name):
|
||||
stack = get_stack_by_name(stack_name)
|
||||
if not stack:
|
||||
print(f"[INFO] Stack {stack_name} not found, skipping start")
|
||||
return
|
||||
url = f"{PORTAINER_URL}/stacks/{stack['Id']}/start?endpointId={ENDPOINT_ID}"
|
||||
r = requests.post(url, headers=HEADERS)
|
||||
if r.status_code == 200:
|
||||
print(f"[OK] Stack {stack_name} started")
|
||||
else:
|
||||
print(f"[ERROR] Failed to start stack {stack_name}: {r.text}")
|
||||
|
||||
def backup_volume(vol):
|
||||
vol_name = vol.name
|
||||
backup_file = f"/backup/{vol_name}.tar"
|
||||
print(f"[INFO] Backing up volume {vol_name} → {backup_file}")
|
||||
# Use a temporary container to archive the volume
|
||||
client.containers.run(
|
||||
image="alpine",
|
||||
command=f"tar czf /backup/{vol_name}.tar -C /data .",
|
||||
volumes={vol_name: {'bind': '/data', 'mode': 'ro'},
|
||||
"/backup": {'bind': '/backup', 'mode': 'rw'}},
|
||||
remove=True
|
||||
)
|
||||
|
||||
def main():
|
||||
volumes = client.volumes.list()
|
||||
stack_volumes = {}
|
||||
normal_volumes = []
|
||||
|
||||
# Categorize volumes by stack
|
||||
for vol in volumes:
|
||||
labels = vol.attrs.get('Labels', {})
|
||||
stack_name = labels.get('com.docker.stack.namespace')
|
||||
if stack_name:
|
||||
stack_volumes.setdefault(stack_name, []).append(vol)
|
||||
else:
|
||||
normal_volumes.append(vol)
|
||||
|
||||
# Backup stacks
|
||||
for stack_name, vols in stack_volumes.items():
|
||||
stop_stack(stack_name)
|
||||
#input("Press Enter to continue with backup...")
|
||||
for v in vols:
|
||||
backup_volume(v)
|
||||
time.sleep(10) # Small delay to ensure stack is fully stopped
|
||||
start_stack(stack_name)
|
||||
|
||||
# Backup normal volumes
|
||||
for v in normal_volumes:
|
||||
backup_volume(v)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
148
converted.yml
148
converted.yml
@@ -1,148 +0,0 @@
|
||||
version: '3.9'
|
||||
services:
|
||||
authentik_ldap:
|
||||
environment:
|
||||
AUTHENTIK_HOST: https://auth.sectorq.eu
|
||||
AUTHENTIK_INSECURE: 'false'
|
||||
AUTHENTIK_TOKEN: EfLokorVuj1woeO0p1he3mRJvVfGfvdKM8Bdew3DtDZZ3To6bVpFSDI7GOqY
|
||||
TZ: Europe/Bratislava
|
||||
image: ${DOCKER_REGISTRY:-}ghcr.io/goauthentik/ldap:${AUTHENTIK_TAG:-2024.6.1}
|
||||
ports:
|
||||
- 2389:3389
|
||||
- 2636:6636
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
postgresql:
|
||||
environment:
|
||||
AUTHENTIK_SECRET_KEY: $AUTHENTIK_SECRET_KEY
|
||||
POSTGRES_DB: ${PG_DB:-authentik}
|
||||
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
|
||||
POSTGRES_USER: ${PG_USER:-authentik}
|
||||
TZ: Europe/Bratislava
|
||||
healthcheck:
|
||||
interval: 30s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}
|
||||
timeout: 5s
|
||||
image: ${DOCKER_REGISTRY:-docker.io/library/}postgres:16-alpine
|
||||
volumes:
|
||||
- /share/docker_data/authentik/database:/var/lib/postgresql/data
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
wud.watch: false
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
redis:
|
||||
command: --save 60 1 --loglevel warning
|
||||
healthcheck:
|
||||
interval: 30s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
test:
|
||||
- CMD-SHELL
|
||||
- redis-cli ping | grep PONG
|
||||
timeout: 3s
|
||||
image: ${DOCKER_REGISTRY:-docker.io/library/}redis:alpine
|
||||
volumes:
|
||||
- redis:/data
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
server:
|
||||
command: server
|
||||
environment:
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
|
||||
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_SECRET_KEY: $AUTHENTIK_SECRET_KEY
|
||||
TZ: Europe/Bratislava
|
||||
image: ${DOCKER_REGISTRY:-}ghcr.io/goauthentik/server:${AUTHENTIK_TAG:-2024.6.1}
|
||||
ports:
|
||||
- ${COMPOSE_PORT_HTTP:-9003}:9000
|
||||
- ${COMPOSE_PORT_HTTPS:-9453}:9443
|
||||
volumes:
|
||||
- /share/docker_data/authentik/media:/media
|
||||
- /share/docker_data/authentik/custom-templates:/templates
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
homepage.container: authentik-server-1
|
||||
homepage.description: Authentification server
|
||||
homepage.group: Utilities
|
||||
homepage.href: https://auth.sectorq.eu
|
||||
homepage.icon: authentik.png
|
||||
homepage.name: Authentik
|
||||
homepage.server: my-docker
|
||||
homepage.weight: '10'
|
||||
homepage.widget.key: sVOwPPInTue7ZnvolmKG15hkE9gCyLcuAelLOQny6OIVn7JUilny9loPTG0v
|
||||
homepage.widget.type: authentik
|
||||
homepage.widget.url: https://auth.sectorq.eu
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
worker:
|
||||
command: worker
|
||||
environment:
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
|
||||
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_SECRET_KEY: $AUTHENTIK_SECRET_KEY
|
||||
TZ: Europe/Bratislava
|
||||
image: ${DOCKER_REGISTRY:-}ghcr.io/goauthentik/server:${AUTHENTIK_TAG:-2024.6.1}
|
||||
user: root
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /share/docker_data/authentik/media:/media
|
||||
- /share/docker_data/authentik/certs:/certs
|
||||
- /share/docker_data/authentik/custom-templates:/templates
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
labels:
|
||||
wud.watch: true
|
||||
wud.watch.digest: true
|
||||
placement:
|
||||
constraints:
|
||||
- node.role == manager
|
||||
volumes:
|
||||
database:
|
||||
driver: local
|
||||
redis:
|
||||
driver: local
|
||||
@@ -1,63 +0,0 @@
|
||||
import docker
|
||||
import os
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Portainer helper - use env vars or pass credentials.")
|
||||
|
||||
parser.add_argument("--volume_name","-v", type=str, default=None, help="Volume name")
|
||||
parser.add_argument("--source_dir","-s", type=str, default=None, help="Source directory to copy from")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def copy_to_volume(volume_name, source_dir, container_path="/data", image="busybox:latest"):
|
||||
"""
|
||||
Copy all files from source_dir into a Docker volume using a temporary container.
|
||||
Creates the volume if it does not exist.
|
||||
|
||||
:param volume_name: Name of the Docker volume
|
||||
:param source_dir: Local directory to copy
|
||||
:param container_path: Path inside the container where volume is mounted
|
||||
:param image: Temporary container image
|
||||
"""
|
||||
client = docker.from_env()
|
||||
|
||||
if not os.path.isdir(source_dir):
|
||||
raise ValueError(f"Source directory {source_dir} does not exist")
|
||||
if not os.listdir(source_dir):
|
||||
print("Folder is empty")
|
||||
return 1
|
||||
else:
|
||||
print("Folder is not empty")
|
||||
# Check if volume exists
|
||||
try:
|
||||
volume = client.volumes.get(volume_name)
|
||||
print(f"Volume '{volume_name}' exists.")
|
||||
except docker.errors.NotFound:
|
||||
print(f"Volume '{volume_name}' does not exist. Creating...")
|
||||
volume = client.volumes.create(name=volume_name)
|
||||
print(f"Volume '{volume_name}' created.")
|
||||
|
||||
print(f"Copying files from {source_dir} to volume '{volume_name}'...")
|
||||
|
||||
# Run temporary container to copy files
|
||||
print(container_path)
|
||||
client.containers.run(
|
||||
image,
|
||||
command=f"sh -c 'cp -r /tmp/* {container_path}/'",
|
||||
volumes={
|
||||
volume_name: {"bind": container_path, "mode": "rw"},
|
||||
os.path.abspath(source_dir): {"bind": "/tmp", "mode": "ro"}
|
||||
},
|
||||
remove=True,
|
||||
detach=False
|
||||
)
|
||||
|
||||
print("Files copied successfully.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
copy_to_volume(
|
||||
volume_name=args.volume_name,
|
||||
source_dir=args.source_dir,
|
||||
container_path="/data"
|
||||
)
|
||||
@@ -1,2 +0,0 @@
|
||||
pyyaml
|
||||
docker
|
||||
124
yaml_convert.py
124
yaml_convert.py
@@ -1,124 +0,0 @@
|
||||
import yaml
|
||||
import sys
|
||||
import copy
|
||||
|
||||
def default_deploy():
|
||||
return {
|
||||
"mode": "replicated",
|
||||
"replicas": 1,
|
||||
"restart_policy": {"condition": "any"},
|
||||
"labels": {},
|
||||
"placement": {
|
||||
"constraints": [
|
||||
"node.role == manager"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def convert_service(service):
|
||||
swarm_service = {}
|
||||
|
||||
# Create a fresh deploy section each time (avoids YAML anchors)
|
||||
deploy_section = default_deploy()
|
||||
|
||||
for key, value in service.items():
|
||||
#print(key, value)
|
||||
# Unsupported in Swarm
|
||||
|
||||
|
||||
# Move labels → deploy.labels
|
||||
#print(f"Labels: {deploy_section['labels']}")
|
||||
if key == "labels":
|
||||
input(f"Key: {key} Value: {value}")
|
||||
#print("Processing Labels:")
|
||||
if isinstance(value, dict):
|
||||
deploy_section["labels"].update({k: str(v).lower() for k, v in value.items()})
|
||||
|
||||
|
||||
elif isinstance(value, list):
|
||||
for item in value:
|
||||
if "=" in item:
|
||||
k, v = item.split("=", 1)
|
||||
deploy_section["labels"][k] = str(v).lower()
|
||||
|
||||
continue
|
||||
|
||||
swarm_service[key] = value
|
||||
envir = []
|
||||
for en in swarm_service['environment']:
|
||||
#print(f"Environment Variable: {en} : {swarm_service['environment'][en]}")
|
||||
if "=" in en:
|
||||
e = en.split("=",1)[0]
|
||||
envir.append(e)
|
||||
print(en)
|
||||
print(swarm_service['environment'][en])
|
||||
swarm_service['environment'].appeendstr(swarm_service['environment'][en]).lower()
|
||||
#print("Deploy Section:")
|
||||
#print(swarm_service)
|
||||
# Merge user deploy section if present
|
||||
#input(service)
|
||||
if "deploy" in service:
|
||||
user_deploy = service["deploy"]
|
||||
#print("User Deploy Section:")
|
||||
# merge deploy.labels
|
||||
if "labels" in user_deploy:
|
||||
##print("User Deploy Labels:")
|
||||
labels = user_deploy["labels"]
|
||||
if isinstance(labels, dict):
|
||||
deploy_section["labels"].update(labels)
|
||||
elif isinstance(labels, list):
|
||||
for item in labels:
|
||||
#print(f"Label Item: {item}")
|
||||
if "=" in item:
|
||||
k, v = item.split("=", 1)
|
||||
deploy_section["labels"][k] = str(v).lower()
|
||||
|
||||
# merge placement constraints
|
||||
if "placement" in user_deploy:
|
||||
if "constraints" in user_deploy["placement"]:
|
||||
deploy_section["placement"]["constraints"].extend(
|
||||
user_deploy["placement"]["constraints"]
|
||||
)
|
||||
|
||||
# merge other keys
|
||||
for dk, dv in user_deploy.items():
|
||||
if dk not in ["labels", "placement"]:
|
||||
deploy_section[dk] = copy.deepcopy(dv)
|
||||
|
||||
swarm_service["deploy"] = deploy_section
|
||||
return swarm_service
|
||||
|
||||
|
||||
def convert_compose_to_swarm(app):
|
||||
output_file = "__swarm/" + app + "/" + app + "-swarm.yml"
|
||||
input_file = app + "/docker-compose.yml"
|
||||
with open(input_file, "r") as f:
|
||||
compose = yaml.safe_load(f)
|
||||
|
||||
swarm = {"version": "3.9", "services": {}}
|
||||
|
||||
for name, service in compose.get("services", {}).items():
|
||||
swarm["services"][name] = convert_service(service)
|
||||
|
||||
for section in ["networks", "volumes", "configs", "secrets"]:
|
||||
if section in compose:
|
||||
swarm[section] = compose[section]
|
||||
|
||||
# Prevent PyYAML from creating anchors
|
||||
class NoAliasDumper(yaml.SafeDumper):
|
||||
def ignore_aliases(self, data):
|
||||
return True
|
||||
|
||||
with open(output_file, "w") as f:
|
||||
yaml.dump(swarm, f, sort_keys=False, Dumper=NoAliasDumper)
|
||||
|
||||
print(f"✔ Swarm file written to: {output_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python convert_to_swarm.py app_name")
|
||||
sys.exit(1)
|
||||
|
||||
convert_compose_to_swarm(sys.argv[1])
|
||||
172
yaml_convert2.py
172
yaml_convert2.py
@@ -1,172 +0,0 @@
|
||||
import yaml
|
||||
import sys
|
||||
import re
|
||||
import os
|
||||
stack_name = sys.argv[1]
|
||||
INPUT_FILE = f"{stack_name}/docker-compose.yml"
|
||||
OUTPUT_FILE = f"__swarm/{stack_name}/{stack_name}-swarm.yml"
|
||||
|
||||
|
||||
|
||||
|
||||
def fix_env_file(filepath):
|
||||
"""Convert YAML-style env (KEY: value) → Docker env (KEY=value)."""
|
||||
fixed_lines = []
|
||||
changed = False
|
||||
|
||||
with open(filepath, "r") as f:
|
||||
for raw_line in f:
|
||||
line = raw_line.rstrip("\n")
|
||||
stripped = line.strip()
|
||||
|
||||
# Preserve comments and blank lines
|
||||
if not stripped or stripped.startswith("#"):
|
||||
fixed_lines.append(raw_line)
|
||||
continue
|
||||
|
||||
# Detect YAML-style: KEY: value
|
||||
# MUST convert
|
||||
if ":" in stripped and "=" not in stripped.split(":")[0]:
|
||||
key, value = stripped.split(":", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
# Validate env key
|
||||
if not re.match(r"^[A-Za-z0-9_]+$", key):
|
||||
raise ValueError(f"Invalid variable name: {key}")
|
||||
|
||||
fixed_lines.append(f"{key}={value}\n")
|
||||
changed = True
|
||||
continue
|
||||
|
||||
# Detect valid Docker-style: KEY=value
|
||||
if "=" in stripped:
|
||||
key, value = stripped.split("=", 1)
|
||||
|
||||
# Validate key
|
||||
if not re.match(r"^[A-Za-z0-9_]+$", key):
|
||||
raise ValueError(f"Invalid environment variable name: {key}")
|
||||
|
||||
# Value may contain anything
|
||||
fixed_lines.append(raw_line)
|
||||
continue
|
||||
|
||||
# Anything else is invalid
|
||||
raise ValueError(f"Invalid env line: {stripped}")
|
||||
|
||||
# Write file if modified
|
||||
if changed:
|
||||
with open(filepath, "w") as f:
|
||||
f.writelines(fixed_lines)
|
||||
print(f"[FIXED] Converted YAML → Docker env format in {filepath}")
|
||||
else:
|
||||
print(f"[OK] .env file valid: {filepath}")
|
||||
|
||||
def convert_ports(ports):
|
||||
"""Convert short port syntax to Swarm long syntax."""
|
||||
result = []
|
||||
print(f"Converting ports: {ports}")
|
||||
for p in ports:
|
||||
print(f"Port entry: {p}")
|
||||
if isinstance(p, str):
|
||||
# format: "8080:80"
|
||||
pub, tgt = p.split(":")
|
||||
result.append({
|
||||
"target": int(tgt),
|
||||
"published": int(pub),
|
||||
"protocol": "tcp",
|
||||
"mode": "ingress"
|
||||
})
|
||||
else:
|
||||
result.append(p)
|
||||
return result
|
||||
|
||||
def to_str_lower(value):
|
||||
"""Convert value to string. Booleans become lowercase 'true'/'false'."""
|
||||
if isinstance(value, bool):
|
||||
return "true" if value else "false"
|
||||
return str(value)
|
||||
|
||||
def env_list_to_dict(env_list):
|
||||
"""Convert environment from list ['KEY=VAL'] to dict {KEY: VAL} as strings."""
|
||||
env_dict = {}
|
||||
for item in env_list:
|
||||
key, value = item.split("=", 1)
|
||||
# convert 'true'/'false' strings to lowercase
|
||||
if value.lower() in ["true", "false"]:
|
||||
env_dict[key] = value.lower()
|
||||
else:
|
||||
env_dict[key] = str(value)
|
||||
return env_dict
|
||||
|
||||
def ensure_labels_as_string(labels):
|
||||
"""Ensure all label values are strings, lowercase for booleans."""
|
||||
return {k: to_str_lower(v) for k, v in labels.items()}
|
||||
|
||||
def convert_compose_to_swarm(data):
|
||||
services = data.get("services", {})
|
||||
#input(services)
|
||||
for name, svc in services.items():
|
||||
print(f"Converting service: {name} , svc: {svc}")
|
||||
if name in ["container_name", "restart", "depends_on"]:
|
||||
continue
|
||||
|
||||
svc.pop('restart', None)
|
||||
svc.pop('depends_on', None)
|
||||
svc.pop('container_name', None)
|
||||
# 1) Convert environment list → dict (strings)
|
||||
if "environment" in svc and isinstance(svc["environment"], list):
|
||||
svc["environment"] = env_list_to_dict(svc["environment"])
|
||||
|
||||
# 2) Ensure deploy exists
|
||||
deploy = svc.setdefault("deploy", {})
|
||||
|
||||
# 3) Move labels into deploy.labels, all as strings (lowercase booleans)
|
||||
if "labels" in svc:
|
||||
deploy.setdefault("labels", {})
|
||||
if isinstance(svc["labels"], dict):
|
||||
deploy["labels"].update(ensure_labels_as_string(svc["labels"]))
|
||||
|
||||
|
||||
elif isinstance(svc["labels"], list):
|
||||
for label in svc["labels"]:
|
||||
key, value = label.split("=", 1)
|
||||
deploy["labels"][key] = value.lower() if value.lower() in ["true", "false"] else str(value)
|
||||
del svc["labels"]
|
||||
labels = deploy.get("labels", {})
|
||||
if "homepage.server" in labels and labels["homepage.server"] == "my-docker":
|
||||
labels["homepage.server"] = "my-docker-swarm"
|
||||
# 4) Default replicas
|
||||
deploy.setdefault("replicas", 1)
|
||||
|
||||
# 5) Add placement constraint
|
||||
deploy.setdefault("placement", {})
|
||||
deploy["placement"].setdefault("constraints", [])
|
||||
if "node.role == manager" not in deploy["placement"]["constraints"]:
|
||||
deploy["placement"]["constraints"].append("node.role == manager")
|
||||
|
||||
# 6) Convert ports to long format
|
||||
if "ports" in svc:
|
||||
#input(svc)
|
||||
svc["ports"] = convert_ports(svc["ports"])
|
||||
|
||||
# 7) Remove container_name (not allowed in Swarm)
|
||||
svc.pop("container_name", None)
|
||||
|
||||
return data
|
||||
|
||||
def main():
|
||||
if os.path.exists(f"__swarm/{stack_name}/stack.env"):
|
||||
fix_env_file(f"__swarm/{stack_name}/stack.env") # NEW FIX STEP
|
||||
with open(INPUT_FILE, "r") as f:
|
||||
compose = yaml.safe_load(f)
|
||||
#input(compose)
|
||||
swarm = convert_compose_to_swarm(compose)
|
||||
|
||||
with open(OUTPUT_FILE, "w") as f:
|
||||
yaml.dump(swarm, f, sort_keys=False)
|
||||
|
||||
print(f"Swarm stack file written to {OUTPUT_FILE}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user