Compare commits

...

43 Commits

Author SHA1 Message Date
122ef64ad7 build 2025-09-09 11:20:50 +02:00
99f673996e build 2025-09-03 00:44:11 +02:00
68e23da03b build 2025-09-03 00:42:59 +02:00
1c30fb9995 build 2025-09-03 00:42:34 +02:00
b94b61ba5d build 2025-09-03 00:42:04 +02:00
26c3245cf3 build 2025-09-03 00:41:26 +02:00
1510dc2e8d build 2025-09-03 00:40:13 +02:00
d949188a34 build 2025-09-03 00:39:24 +02:00
298c9aee23 build 2025-09-02 22:52:54 +02:00
808f92c4d1 build 2025-09-02 22:50:36 +02:00
43f6d2abcb build 2025-09-02 22:47:01 +02:00
81c7ead7b2 build 2025-09-02 22:44:12 +02:00
cd818da774 build 2025-09-02 22:26:51 +02:00
a1dfbd664c build 2025-09-02 22:23:40 +02:00
571219881d build 2025-09-01 14:01:43 +02:00
0c07fde85a build 2025-09-01 13:58:44 +02:00
f46eacf627 build 2025-09-01 13:54:13 +02:00
92226734ef build 2025-09-01 13:52:25 +02:00
d3359e9a68 build 2025-09-01 13:49:24 +02:00
fdbe4eebe1 build 2025-09-01 13:32:58 +02:00
3a3faad97e build 2025-09-01 13:22:21 +02:00
8be3e20523 build 2025-09-01 13:20:29 +02:00
126ab1813b build 2025-09-01 13:19:35 +02:00
28efc95b4d build 2025-09-01 13:18:50 +02:00
f6a106fd91 build 2025-09-01 13:17:03 +02:00
bbe4d72666 build 2025-09-01 12:47:25 +02:00
b536c8ecb1 build 2025-09-01 12:37:22 +02:00
7a8130c3f0 build 2025-09-01 12:35:46 +02:00
5756798269 build 2025-09-01 12:34:02 +02:00
d6af0c24b5 build 2025-09-01 12:30:32 +02:00
6f9a2bba67 build 2025-09-01 12:24:42 +02:00
11bc56ecb3 build 2025-09-01 12:23:17 +02:00
b4032eca7e Merge branch 'main' of gitlab.sectorq.eu:jaydee/omv_backup 2025-09-01 12:22:38 +02:00
f75ac2eb79 build 2025-09-01 12:22:00 +02:00
a555567c4b Update .gitlab-ci.yml file 2025-09-01 12:21:49 +02:00
de63a1e9aa build 2025-09-01 12:19:30 +02:00
d3eab9f50e build 2025-09-01 12:15:01 +02:00
72a2fa5710 build 2025-09-01 10:36:56 +02:00
b3be50bfdd build 2025-09-01 10:27:40 +02:00
f32ada9ad5 build 2025-09-01 10:16:52 +02:00
d6492ebf80 build 2025-08-27 04:38:57 +02:00
7cee7570b4 added v3 2025-08-27 04:37:26 +02:00
ce7c855808 added v3 2025-08-27 04:35:27 +02:00
3 changed files with 140 additions and 56 deletions

View File

@@ -25,6 +25,6 @@ build-job: # This job runs in the build stage, which runs first.
script: script:
- column=":" - column=":"
- echo "${flow_id}" - echo "${flow_id}"
- curl -X POST https://kestra.sectorq.eu/api/v1/executions/webhook/jaydee/ansible-all/${flow_id} -d '{"tag":["setup","omv_backup"],"target":["servers"]}' -H "Content-Type${column} application/json" - curl -X POST https://kestra.sectorq.eu/api/v1/executions/webhook/jaydee/ansible-all/${flow_id} -d '{"tag":["omv_backup"],"target":["servers"]}' -H "Content-Type${column} application/json"
rules: rules:
- if: '$CI_COMMIT_MESSAGE =~ /build/' - if: '$CI_COMMIT_MESSAGE =~ /build/'

View File

@@ -11,3 +11,5 @@ var_lib_motioneye/*
*/.esphome/build/* */.esphome/build/*
nextcloud/mariadb/* nextcloud/mariadb/*
zabbix-server/postgres-data/* zabbix-server/postgres-data/*
gitea-runner/*
immich/library/*

View File

@@ -31,7 +31,7 @@ def signal_handler(sig, frame):
signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGINT, signal_handler)
file_path = os.path.realpath(__file__) file_path = os.path.realpath(__file__)
dir_path = os.path.dirname(file_path) dir_path = os.path.dirname(file_path)
VERSION="1.0.9" VERSION="1.0.10"
# print(file_path) # print(file_path)
# print(dir_path) # print(dir_path)
os.chdir(dir_path) os.chdir(dir_path)
@@ -110,7 +110,6 @@ for o, a in opts:
elif o in ("-r", "--restore"): elif o in ("-r", "--restore"):
_RESTORE = True _RESTORE = True
_APP = a _APP = a
print("RESTORE")
elif o in ("-D", "--dry"): elif o in ("-D", "--dry"):
_EXECUTE = False _EXECUTE = False
elif o in ("-T", "--dry"): elif o in ("-T", "--dry"):
@@ -240,10 +239,13 @@ if _STOP:
continue continue
cmnd = f"docker stop {c.split()[-1]}" cmnd = f"docker stop {c.split()[-1]}"
status, running_containers = subprocess.getstatusoutput(cmnd) status, running_containers = subprocess.getstatusoutput(cmnd)
def restore_job(): def restore_job(_APP):
#global VERSION
logging.info("Starting Restore") logging.info("Starting Restore")
print(f"Starting restore : {VERSION}")
now = datetime.datetime.now() now = datetime.datetime.now()
STARTTIME = now.strftime("%Y-%m-%d_%H:%M:%S") STARTTIME = now.strftime("%Y-%m-%d_%H:%M:%S")
_DATE = "pick"
if _APP == "all": if _APP == "all":
_DATE = "latest" _DATE = "latest"
if host == "rpi5.home.lan" or host == "rpi5": if host == "rpi5.home.lan" or host == "rpi5":
@@ -256,6 +258,7 @@ def restore_job():
#input("????") #input("????")
else: else:
_APP = _APP.split(",") _APP = _APP.split(",")
PROGRESS = 0 PROGRESS = 0
topic = "sectorq/amd/restore" topic = "sectorq/amd/restore"
step = 100 / len(_APP) step = 100 / len(_APP)
@@ -277,7 +280,7 @@ def restore_job():
if _DATE == "pick": if _DATE == "pick":
cmnd = f"ssh root@amd.home.lan 'ls {BACKUP_DEVICE}/backup/m-server/docker_data'" cmnd = f"ssh root@amd.home.lan 'ls {BACKUP_DEVICE}/backup/m-server/docker_data'"
status, output = subprocess.getstatusoutput(cmnd) status, output = subprocess.getstatusoutput(cmnd)
print(output) # print(output)
dates = output.splitlines() dates = output.splitlines()
n = 1 n = 1
for i in dates: for i in dates:
@@ -304,7 +307,7 @@ def restore_job():
LATEST_LINK = f"/{host}/{app}/{_DATE}" LATEST_LINK = f"/{host}/{app}/{_DATE}"
logging.info("Create backup dir") logging.info("Create backup dir")
logging.info(cmnd) #logging.info(cmnd)
#cmnd = "rsync -av --delete {}/ --link-dest {} --exclude=\".cache\" {}".format(SOURCE_DIR, LATEST_LINK, BACKUP_PATH) #cmnd = "rsync -av --delete {}/ --link-dest {} --exclude=\".cache\" {}".format(SOURCE_DIR, LATEST_LINK, BACKUP_PATH)
@@ -478,23 +481,40 @@ def restore_job():
cmnd = "ssh root@amd.home.lan 'systemctl suspend &'" cmnd = "ssh root@amd.home.lan 'systemctl suspend &'"
status, output = subprocess.getstatusoutput(cmnd) status, output = subprocess.getstatusoutput(cmnd)
def backup_job(server): def backup_job(pl):
client = mqtt.Client() client2 = mqtt.Client()
client.username_pw_set("jaydee", "jaydee1") client2.username_pw_set("jaydee", "jaydee1")
client.connect("mqtt.home.lan",1883,60) client2.connect("mqtt.home.lan",1883,60)
if "log" in pl:
if pl["log"] == "debug":
logging.info(f'Debug enabled')
LOG_FILE = "omv_backup.log"
logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s : %(levelname)s : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.info(f'starting backup job') logging.info(f'starting backup job')
server = pl["host"]
if pl["mode"] == "dry":
_DRYRUN = True
logging.info("Dry run active")
else:
_DRYRUN = False
logging.info("Full mode active")
finished = [] finished = []
sub_finished = []
now = datetime.datetime.now() now = datetime.datetime.now()
STARTTIME = now.strftime("%Y-%m-%d_%H:%M:%S") STARTTIME = now.strftime("%Y-%m-%d_%H:%M:%S")
topic = "sectorq/amd/restore" topic = "sectorq/amd/restore"
msg = {"mode":"restore", "status":"restore","bak_name":"s","host":0,"cur_job":"aaa","start_time":1,"end_time":1,"progress":0,"finished":0,"used_space":0} msg = {"mode":"restore", "status":"restore","bak_name":"s","host":0,"cur_job":"aaa","start_time":1,"end_time":1,"progress":0,"finished":0,"used_space":0}
client.publish(topic, json.dumps(msg),qos=0, retain=True) client2.publish(topic, json.dumps(msg),qos=0, retain=True)
#client.publish(topic, msg) #client2.publish(topic, msg)
topic = "sectorq/amd/backups" topic = "sectorq/amd/backups"
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":"","cur_job":"","start_time":STARTTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)} msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":"","cur_job":"","start_time":STARTTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)}
client.publish(topic, json.dumps(msg),qos=0, retain=True) client2.publish(topic, json.dumps(msg),qos=0, retain=True)
# iterate over files in # iterate over files in
# that directory # that directory
@@ -507,7 +527,7 @@ def backup_job(server):
if not backups[host]["jobs"][b]["active"]: if not backups[host]["jobs"][b]["active"]:
logging.info("Backup {} is not active!".format(b)) logging.info("Backup {} is not active!".format(b))
msg = {"status":"inactive","bak_name":b,"start_time":"inactive","end_time":"inactive","progress":0} msg = {"status":"inactive","bak_name":b,"start_time":"inactive","end_time":"inactive","progress":0}
client.publish(topic, json.dumps(msg),qos=0, retain=True) client2.publish(topic, json.dumps(msg),qos=0, retain=True)
continue continue
SOURCE_DIR = backups[host]["jobs"][b]["source"] SOURCE_DIR = backups[host]["jobs"][b]["source"]
@@ -528,7 +548,7 @@ def backup_job(server):
# msg = {"status":"started","bak_name":b,"start_time":DATETIME,"end_time":"in progress", "progress":0} # msg = {"status":"started","bak_name":b,"start_time":DATETIME,"end_time":"in progress", "progress":0}
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"start_time":STARTTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)} msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"start_time":STARTTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)}
client.publish(topic, json.dumps(msg),qos=0, retain=True) client2.publish(topic, json.dumps(msg),qos=0, retain=True)
@@ -554,33 +574,36 @@ def backup_job(server):
progress = 0 progress = 0
cmd = ['rsync', '-avz', '--delete', BACKUP_DIR, '--link-dest', FULL_BACKUP_LATEST, '--exclude-from=/myapps/exclude.txt', NEW_BACKUP_DIR] cmd = ['rsync', '-avz', '--delete', BACKUP_DIR, '--link-dest', FULL_BACKUP_LATEST, '--exclude-from=/myapps/exclude.txt', NEW_BACKUP_DIR]
logging.info(" ".join(cmd)) logging.info(" ".join(cmd))
process = subprocess.Popen(cmd, topic = "sectorq/amd/backups"
stdout=subprocess.PIPE)
while process.poll() is None: if not _DRYRUN:
line = process.stdout.readline().decode("utf-8").split("/") process = subprocess.Popen(cmd,
print(line[0]) stdout=subprocess.PIPE)
if line[0] in apps: while process.poll() is None:
logging.info(f"Working on app {line[0]}") line = process.stdout.readline().decode("utf-8").split("/")
while True: #print(line[0])
if line[0] != apps[0]: if line[0] in apps:
del apps[0] logging.info(f"Working on app {line[0]}")
progress = progress + step while True:
else: if line[0] != apps[0]:
break del apps[0]
apps.remove(line[0]) progress = progress + step
#print(len(apps)) else:
topic = "sectorq/amd/backups" break
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"sub":line[0],"start_time":STARTTIME,"end_time":"in progress","progress":str(round(progress)) + "%","finished":",".join(finished)} apps.remove(line[0])
client.publish(topic, json.dumps(msg),qos=0, retain=False) sub_finished.append(line[0])
progress = progress + step msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"sub":line[0],"start_time":STARTTIME,"end_time":"in progress","progress":str(round(progress)) + "%","finished":",".join(finished),"sub_finished":",".join(sub_finished)}
logging.info(f"Sending message with topic {topic} {json.dumps(msg)}")
if not "gitea-runner" == line[0]:
client2.publish(topic, json.dumps(msg),qos=0, retain=False)
progress = progress + step
cmnd = f"rm -rf {FULL_BACKUP_LATEST}" cmnd = f"rm -rf {FULL_BACKUP_LATEST}"
#logging.info(cmnd) #logging.info(cmnd)
logging.info("Removing latest link") logging.info("Removing latest link")
# input("????") # input("????")
if _EXECUTE: if not _DRYRUN:
status, output = subprocess.getstatusoutput(cmnd) status, output = subprocess.getstatusoutput(cmnd)
if _FIRST: if _FIRST:
cmnd = f"cd {BACKUP_ROOT}; ln -s initial latest" cmnd = f"cd {BACKUP_ROOT}; ln -s initial latest"
@@ -589,7 +612,7 @@ def backup_job(server):
logging.info("Creating new latest link") logging.info("Creating new latest link")
#print(cmnd) #print(cmnd)
# input("????") # input("????")
if _EXECUTE: if not _DRYRUN:
status, output = subprocess.getstatusoutput(cmnd) status, output = subprocess.getstatusoutput(cmnd)
#Remove old #Remove old
@@ -597,7 +620,7 @@ def backup_job(server):
cmnd = f"ls {BACKUP_ROOT}" cmnd = f"ls {BACKUP_ROOT}"
if _EXECUTE: if not _DRYRUN:
status, output = subprocess.getstatusoutput(cmnd) status, output = subprocess.getstatusoutput(cmnd)
for f in output.splitlines(): for f in output.splitlines():
pattern = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}$" # regex pattern: string starts with 'abc' pattern = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}$" # regex pattern: string starts with 'abc'
@@ -616,10 +639,54 @@ def backup_job(server):
shutil.rmtree(dir_path) shutil.rmtree(dir_path)
else: else:
print("No match.") print("No match.")
if not _DRYRUN:
logging.info(f"Clearing multiple days")
multiple_per_day = {}
to_remove = []
for f in output.splitlines():
pattern = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}$" # regex pattern: string starts with 'abc'
if re.match(pattern, f):
cday = f.split("_")[0]
if cday in multiple_per_day:
multiple_per_day[cday].append(f)
else:
multiple_per_day[cday] = [f]
# # logging.info("Match!")
# dt = datetime.datetime.strptime(f, "%Y-%m-%d_%H-%M-%S")
# epoch_time = int(dt.timestamp())
# now_epoch = int(datetime.datetime.now().timestamp())
# x = now_epoch - epoch_time
# # logging.info(epoch_time) # Output: 45
# if x > 2592000:
# dir_path = f"{BACKUP_ROOT}/{f}"
# logging.info(f"removing {dir_path}")
# shutil.rmtree(dir_path)
else:
print("No match.")
logging.info(f"Clearing multiple days: {multiple_per_day}")
for f in multiple_per_day:
logging.info(f"Looping multiple_per_day : {f}")
if len(multiple_per_day[f]) > 1:
last = multiple_per_day[f][-1]
multiple_per_day[f].pop()
logging.info(f"Last from day: {last}")
for d in multiple_per_day[f]:
logging.info(f"Looping multiple_per_day : {f} : {d}")
dir_path = f"{BACKUP_ROOT}/{d}"
logging.info(f"removing {dir_path}")
shutil.rmtree(dir_path)
cmnd = f"ls {BACKUP_ROOT}|grep _running" cmnd = f"ls {BACKUP_ROOT}|grep _running"
logging.info(f"removing obsolete dirs") logging.info(f"removing obsolete dirs")
if _EXECUTE: if not _DRYRUN:
status, output = subprocess.getstatusoutput(cmnd) status, output = subprocess.getstatusoutput(cmnd)
for f in output.splitlines(): for f in output.splitlines():
dir_path = f"{BACKUP_ROOT}/{f}" dir_path = f"{BACKUP_ROOT}/{f}"
@@ -631,7 +698,7 @@ def backup_job(server):
#msg = {"status":"finished","bak_name":b,"start_time":DATETIME,"end_time":ENDTIME,"progress":0} #msg = {"status":"finished","bak_name":b,"start_time":DATETIME,"end_time":ENDTIME,"progress":0}
finished.append(b) finished.append(b)
msg = {"mode":_MODE, "status":"finished","bak_name":"complete","host":host,"cur_job":b,"start_time":ENDTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)} msg = {"mode":_MODE, "status":"finished","bak_name":"complete","host":host,"cur_job":b,"start_time":ENDTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)}
client.publish(topic, json.dumps(msg),qos=0, retain=True) client2.publish(topic, json.dumps(msg),qos=0, retain=True)
logging.info("Getting size of FS") logging.info("Getting size of FS")
cmnd = "df -h /mnt/raid|awk '{ print $3 }'|tail -1" cmnd = "df -h /mnt/raid|awk '{ print $3 }'|tail -1"
@@ -646,14 +713,17 @@ def backup_job(server):
msg = {"mode":_MODE, "status":"finished","bak_name":"complete","host":host,"cur_job":b,"start_time":STARTTIME,"end_time":ENDTIME,"progress":0,"finished":",".join(finished),"used_space":used_space} msg = {"mode":_MODE, "status":"finished","bak_name":"complete","host":host,"cur_job":b,"start_time":STARTTIME,"end_time":ENDTIME,"progress":0,"finished":",".join(finished),"used_space":used_space}
logging.info(msg) logging.info(msg)
client.publish(topic, json.dumps(msg),qos=0, retain=True) client2.publish(topic, json.dumps(msg),qos=0, retain=True)
topic = "sectorq/backups/start" topic = "sectorq/backups/start"
logging.info(f"LALA : {topic}") logging.info(f"LALA : {topic}")
client.publish(topic, "finished",qos=0, retain=True) client2.publish(topic, "finished",qos=0, retain=True)
client.disconnect() time.sleep(1)
return "finished" client2.publish(topic, "finished2",qos=0, retain=True)
client2.disconnect()
#return "finished"
if _DRYRUN:
return
topic = "sectorq/amd/restore" topic = "sectorq/amd/restore"
for s in servers: for s in servers:
logging.info(f"Restoring {s}") logging.info(f"Restoring {s}")
@@ -670,7 +740,7 @@ def backup_job(server):
#logging.info(msg) #logging.info(msg)
send_mqtt_message(topic,msg) send_mqtt_message(topic,msg)
continue #continue
if is_port_open(s,22): if is_port_open(s,22):
ssh = paramiko.SSHClient() ssh = paramiko.SSHClient()
ssh.load_system_host_keys() ssh.load_system_host_keys()
@@ -703,7 +773,7 @@ def backup_job(server):
if _RESTORE: if _RESTORE:
restore_job() restore_job(_APP)
sys.exit() sys.exit()
if _SSH_TEST: if _SSH_TEST:
user = "root" user = "root"
@@ -747,15 +817,25 @@ if _SSH_TEST:
# Define actions based on payload # Define actions based on payload
def handle_payload(payload): def handle_payload(payload):
payload = payload.lower() try:
if payload == 'm-server': pl = json.loads(payload)
logging.info("💡 Starting backup job") except:
backup_job(payload) pl = payload
logging.info(f"💡 Finished backup job") logging.debug(pl)
if "host" in pl:
if pl["host"] == 'm-server':
logging.info("💡 Starting backup job")
backup_job(pl)
logging.info(f"💡 Finished backup job")
elif pl["host"] == 'nas':
logging.info("💡 Starting backup job")
backup_job(pl)
logging.info(f"💡 Finished backup job")
else:
logging.error(f"⚠️ Unknown command: {pl}")
else: else:
logging.error(f"⚠️ Unknown command: {payload}") logging.error(f"⚠️ Wrong payload: {pl}")
# Callback when connected # Callback when connected
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
if rc == 0: if rc == 0:
@@ -783,5 +863,7 @@ if USE_TLS:
# Connect and loop forever # Connect and loop forever
client.connect(BROKER, PORT, keepalive=60) client.connect(BROKER, PORT, keepalive=60)
client.publish("sectorq/backups/start", "finished", qos=0, retain=False)
client.publish("sectorq/backups/start", "finished", qos=0, retain=True)
client.loop_forever() client.loop_forever()