mirror of
https://gitlab.sectorq.eu/jaydee/omv_backup.git
synced 2025-09-13 12:10:12 +02:00
Compare commits
43 Commits
d3e15e973a
...
main
Author | SHA1 | Date | |
---|---|---|---|
122ef64ad7 | |||
99f673996e | |||
68e23da03b | |||
1c30fb9995 | |||
b94b61ba5d | |||
26c3245cf3 | |||
1510dc2e8d | |||
d949188a34 | |||
298c9aee23 | |||
808f92c4d1 | |||
43f6d2abcb | |||
81c7ead7b2 | |||
cd818da774 | |||
a1dfbd664c | |||
571219881d | |||
0c07fde85a | |||
f46eacf627 | |||
92226734ef | |||
d3359e9a68 | |||
fdbe4eebe1 | |||
3a3faad97e | |||
8be3e20523 | |||
126ab1813b | |||
28efc95b4d | |||
f6a106fd91 | |||
bbe4d72666 | |||
b536c8ecb1 | |||
7a8130c3f0 | |||
5756798269 | |||
d6af0c24b5 | |||
6f9a2bba67 | |||
11bc56ecb3 | |||
b4032eca7e | |||
f75ac2eb79 | |||
a555567c4b | |||
de63a1e9aa | |||
d3eab9f50e | |||
72a2fa5710 | |||
b3be50bfdd | |||
f32ada9ad5 | |||
d6492ebf80 | |||
7cee7570b4 | |||
ce7c855808 |
@@ -25,6 +25,6 @@ build-job: # This job runs in the build stage, which runs first.
|
||||
script:
|
||||
- column=":"
|
||||
- echo "${flow_id}"
|
||||
- curl -X POST https://kestra.sectorq.eu/api/v1/executions/webhook/jaydee/ansible-all/${flow_id} -d '{"tag":["setup","omv_backup"],"target":["servers"]}' -H "Content-Type${column} application/json"
|
||||
- curl -X POST https://kestra.sectorq.eu/api/v1/executions/webhook/jaydee/ansible-all/${flow_id} -d '{"tag":["omv_backup"],"target":["servers"]}' -H "Content-Type${column} application/json"
|
||||
rules:
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /build/'
|
||||
|
@@ -11,3 +11,5 @@ var_lib_motioneye/*
|
||||
*/.esphome/build/*
|
||||
nextcloud/mariadb/*
|
||||
zabbix-server/postgres-data/*
|
||||
gitea-runner/*
|
||||
immich/library/*
|
160
omv_backup.py
160
omv_backup.py
@@ -31,7 +31,7 @@ def signal_handler(sig, frame):
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
file_path = os.path.realpath(__file__)
|
||||
dir_path = os.path.dirname(file_path)
|
||||
VERSION="1.0.9"
|
||||
VERSION="1.0.10"
|
||||
# print(file_path)
|
||||
# print(dir_path)
|
||||
os.chdir(dir_path)
|
||||
@@ -110,7 +110,6 @@ for o, a in opts:
|
||||
elif o in ("-r", "--restore"):
|
||||
_RESTORE = True
|
||||
_APP = a
|
||||
print("RESTORE")
|
||||
elif o in ("-D", "--dry"):
|
||||
_EXECUTE = False
|
||||
elif o in ("-T", "--dry"):
|
||||
@@ -240,10 +239,13 @@ if _STOP:
|
||||
continue
|
||||
cmnd = f"docker stop {c.split()[-1]}"
|
||||
status, running_containers = subprocess.getstatusoutput(cmnd)
|
||||
def restore_job():
|
||||
def restore_job(_APP):
|
||||
#global VERSION
|
||||
logging.info("Starting Restore")
|
||||
print(f"Starting restore : {VERSION}")
|
||||
now = datetime.datetime.now()
|
||||
STARTTIME = now.strftime("%Y-%m-%d_%H:%M:%S")
|
||||
_DATE = "pick"
|
||||
if _APP == "all":
|
||||
_DATE = "latest"
|
||||
if host == "rpi5.home.lan" or host == "rpi5":
|
||||
@@ -256,6 +258,7 @@ def restore_job():
|
||||
#input("????")
|
||||
else:
|
||||
_APP = _APP.split(",")
|
||||
|
||||
PROGRESS = 0
|
||||
topic = "sectorq/amd/restore"
|
||||
step = 100 / len(_APP)
|
||||
@@ -277,7 +280,7 @@ def restore_job():
|
||||
if _DATE == "pick":
|
||||
cmnd = f"ssh root@amd.home.lan 'ls {BACKUP_DEVICE}/backup/m-server/docker_data'"
|
||||
status, output = subprocess.getstatusoutput(cmnd)
|
||||
print(output)
|
||||
# print(output)
|
||||
dates = output.splitlines()
|
||||
n = 1
|
||||
for i in dates:
|
||||
@@ -304,7 +307,7 @@ def restore_job():
|
||||
LATEST_LINK = f"/{host}/{app}/{_DATE}"
|
||||
|
||||
logging.info("Create backup dir")
|
||||
logging.info(cmnd)
|
||||
#logging.info(cmnd)
|
||||
|
||||
|
||||
#cmnd = "rsync -av --delete {}/ --link-dest {} --exclude=\".cache\" {}".format(SOURCE_DIR, LATEST_LINK, BACKUP_PATH)
|
||||
@@ -478,23 +481,40 @@ def restore_job():
|
||||
cmnd = "ssh root@amd.home.lan 'systemctl suspend &'"
|
||||
status, output = subprocess.getstatusoutput(cmnd)
|
||||
|
||||
def backup_job(server):
|
||||
client = mqtt.Client()
|
||||
client.username_pw_set("jaydee", "jaydee1")
|
||||
client.connect("mqtt.home.lan",1883,60)
|
||||
def backup_job(pl):
|
||||
client2 = mqtt.Client()
|
||||
client2.username_pw_set("jaydee", "jaydee1")
|
||||
client2.connect("mqtt.home.lan",1883,60)
|
||||
if "log" in pl:
|
||||
if pl["log"] == "debug":
|
||||
logging.info(f'Debug enabled')
|
||||
LOG_FILE = "omv_backup.log"
|
||||
logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s : %(levelname)s : %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
|
||||
logging.info(f'starting backup job')
|
||||
|
||||
|
||||
|
||||
|
||||
server = pl["host"]
|
||||
if pl["mode"] == "dry":
|
||||
_DRYRUN = True
|
||||
logging.info("Dry run active")
|
||||
else:
|
||||
_DRYRUN = False
|
||||
logging.info("Full mode active")
|
||||
|
||||
finished = []
|
||||
sub_finished = []
|
||||
now = datetime.datetime.now()
|
||||
STARTTIME = now.strftime("%Y-%m-%d_%H:%M:%S")
|
||||
topic = "sectorq/amd/restore"
|
||||
msg = {"mode":"restore", "status":"restore","bak_name":"s","host":0,"cur_job":"aaa","start_time":1,"end_time":1,"progress":0,"finished":0,"used_space":0}
|
||||
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
#client.publish(topic, msg)
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
#client2.publish(topic, msg)
|
||||
topic = "sectorq/amd/backups"
|
||||
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":"","cur_job":"","start_time":STARTTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)}
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
|
||||
# iterate over files in
|
||||
# that directory
|
||||
@@ -507,7 +527,7 @@ def backup_job(server):
|
||||
if not backups[host]["jobs"][b]["active"]:
|
||||
logging.info("Backup {} is not active!".format(b))
|
||||
msg = {"status":"inactive","bak_name":b,"start_time":"inactive","end_time":"inactive","progress":0}
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
continue
|
||||
|
||||
SOURCE_DIR = backups[host]["jobs"][b]["source"]
|
||||
@@ -528,7 +548,7 @@ def backup_job(server):
|
||||
# msg = {"status":"started","bak_name":b,"start_time":DATETIME,"end_time":"in progress", "progress":0}
|
||||
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"start_time":STARTTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)}
|
||||
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
|
||||
|
||||
|
||||
@@ -554,12 +574,14 @@ def backup_job(server):
|
||||
progress = 0
|
||||
cmd = ['rsync', '-avz', '--delete', BACKUP_DIR, '--link-dest', FULL_BACKUP_LATEST, '--exclude-from=/myapps/exclude.txt', NEW_BACKUP_DIR]
|
||||
logging.info(" ".join(cmd))
|
||||
topic = "sectorq/amd/backups"
|
||||
|
||||
if not _DRYRUN:
|
||||
process = subprocess.Popen(cmd,
|
||||
stdout=subprocess.PIPE)
|
||||
|
||||
while process.poll() is None:
|
||||
line = process.stdout.readline().decode("utf-8").split("/")
|
||||
print(line[0])
|
||||
#print(line[0])
|
||||
if line[0] in apps:
|
||||
logging.info(f"Working on app {line[0]}")
|
||||
while True:
|
||||
@@ -569,10 +591,11 @@ def backup_job(server):
|
||||
else:
|
||||
break
|
||||
apps.remove(line[0])
|
||||
#print(len(apps))
|
||||
topic = "sectorq/amd/backups"
|
||||
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"sub":line[0],"start_time":STARTTIME,"end_time":"in progress","progress":str(round(progress)) + "%","finished":",".join(finished)}
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=False)
|
||||
sub_finished.append(line[0])
|
||||
msg = {"mode":_MODE, "status":"started","bak_name":"complete","host":host,"cur_job":b,"sub":line[0],"start_time":STARTTIME,"end_time":"in progress","progress":str(round(progress)) + "%","finished":",".join(finished),"sub_finished":",".join(sub_finished)}
|
||||
logging.info(f"Sending message with topic {topic} {json.dumps(msg)}")
|
||||
if not "gitea-runner" == line[0]:
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=False)
|
||||
progress = progress + step
|
||||
|
||||
cmnd = f"rm -rf {FULL_BACKUP_LATEST}"
|
||||
@@ -580,7 +603,7 @@ def backup_job(server):
|
||||
#logging.info(cmnd)
|
||||
logging.info("Removing latest link")
|
||||
# input("????")
|
||||
if _EXECUTE:
|
||||
if not _DRYRUN:
|
||||
status, output = subprocess.getstatusoutput(cmnd)
|
||||
if _FIRST:
|
||||
cmnd = f"cd {BACKUP_ROOT}; ln -s initial latest"
|
||||
@@ -589,7 +612,7 @@ def backup_job(server):
|
||||
logging.info("Creating new latest link")
|
||||
#print(cmnd)
|
||||
# input("????")
|
||||
if _EXECUTE:
|
||||
if not _DRYRUN:
|
||||
status, output = subprocess.getstatusoutput(cmnd)
|
||||
|
||||
#Remove old
|
||||
@@ -597,7 +620,7 @@ def backup_job(server):
|
||||
|
||||
cmnd = f"ls {BACKUP_ROOT}"
|
||||
|
||||
if _EXECUTE:
|
||||
if not _DRYRUN:
|
||||
status, output = subprocess.getstatusoutput(cmnd)
|
||||
for f in output.splitlines():
|
||||
pattern = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}$" # regex pattern: string starts with 'abc'
|
||||
@@ -616,10 +639,54 @@ def backup_job(server):
|
||||
shutil.rmtree(dir_path)
|
||||
else:
|
||||
print("No match.")
|
||||
if not _DRYRUN:
|
||||
logging.info(f"Clearing multiple days")
|
||||
multiple_per_day = {}
|
||||
to_remove = []
|
||||
for f in output.splitlines():
|
||||
pattern = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}$" # regex pattern: string starts with 'abc'
|
||||
|
||||
if re.match(pattern, f):
|
||||
cday = f.split("_")[0]
|
||||
if cday in multiple_per_day:
|
||||
multiple_per_day[cday].append(f)
|
||||
else:
|
||||
multiple_per_day[cday] = [f]
|
||||
|
||||
|
||||
|
||||
|
||||
# # logging.info("Match!")
|
||||
# dt = datetime.datetime.strptime(f, "%Y-%m-%d_%H-%M-%S")
|
||||
# epoch_time = int(dt.timestamp())
|
||||
# now_epoch = int(datetime.datetime.now().timestamp())
|
||||
|
||||
|
||||
# x = now_epoch - epoch_time
|
||||
# # logging.info(epoch_time) # Output: 45
|
||||
# if x > 2592000:
|
||||
# dir_path = f"{BACKUP_ROOT}/{f}"
|
||||
# logging.info(f"removing {dir_path}")
|
||||
# shutil.rmtree(dir_path)
|
||||
else:
|
||||
print("No match.")
|
||||
logging.info(f"Clearing multiple days: {multiple_per_day}")
|
||||
for f in multiple_per_day:
|
||||
logging.info(f"Looping multiple_per_day : {f}")
|
||||
if len(multiple_per_day[f]) > 1:
|
||||
last = multiple_per_day[f][-1]
|
||||
multiple_per_day[f].pop()
|
||||
logging.info(f"Last from day: {last}")
|
||||
for d in multiple_per_day[f]:
|
||||
logging.info(f"Looping multiple_per_day : {f} : {d}")
|
||||
dir_path = f"{BACKUP_ROOT}/{d}"
|
||||
logging.info(f"removing {dir_path}")
|
||||
shutil.rmtree(dir_path)
|
||||
|
||||
|
||||
cmnd = f"ls {BACKUP_ROOT}|grep _running"
|
||||
logging.info(f"removing obsolete dirs")
|
||||
if _EXECUTE:
|
||||
if not _DRYRUN:
|
||||
status, output = subprocess.getstatusoutput(cmnd)
|
||||
for f in output.splitlines():
|
||||
dir_path = f"{BACKUP_ROOT}/{f}"
|
||||
@@ -631,7 +698,7 @@ def backup_job(server):
|
||||
#msg = {"status":"finished","bak_name":b,"start_time":DATETIME,"end_time":ENDTIME,"progress":0}
|
||||
finished.append(b)
|
||||
msg = {"mode":_MODE, "status":"finished","bak_name":"complete","host":host,"cur_job":b,"start_time":ENDTIME,"end_time":"in progress","progress":0,"finished":",".join(finished)}
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
|
||||
logging.info("Getting size of FS")
|
||||
cmnd = "df -h /mnt/raid|awk '{ print $3 }'|tail -1"
|
||||
@@ -646,14 +713,17 @@ def backup_job(server):
|
||||
msg = {"mode":_MODE, "status":"finished","bak_name":"complete","host":host,"cur_job":b,"start_time":STARTTIME,"end_time":ENDTIME,"progress":0,"finished":",".join(finished),"used_space":used_space}
|
||||
logging.info(msg)
|
||||
|
||||
client.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
client2.publish(topic, json.dumps(msg),qos=0, retain=True)
|
||||
topic = "sectorq/backups/start"
|
||||
logging.info(f"LALA : {topic}")
|
||||
client.publish(topic, "finished",qos=0, retain=True)
|
||||
client.disconnect()
|
||||
return "finished"
|
||||
|
||||
client2.publish(topic, "finished",qos=0, retain=True)
|
||||
time.sleep(1)
|
||||
client2.publish(topic, "finished2",qos=0, retain=True)
|
||||
client2.disconnect()
|
||||
#return "finished"
|
||||
|
||||
if _DRYRUN:
|
||||
return
|
||||
topic = "sectorq/amd/restore"
|
||||
for s in servers:
|
||||
logging.info(f"Restoring {s}")
|
||||
@@ -670,7 +740,7 @@ def backup_job(server):
|
||||
#logging.info(msg)
|
||||
|
||||
send_mqtt_message(topic,msg)
|
||||
continue
|
||||
#continue
|
||||
if is_port_open(s,22):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.load_system_host_keys()
|
||||
@@ -703,7 +773,7 @@ def backup_job(server):
|
||||
|
||||
|
||||
if _RESTORE:
|
||||
restore_job()
|
||||
restore_job(_APP)
|
||||
sys.exit()
|
||||
if _SSH_TEST:
|
||||
user = "root"
|
||||
@@ -747,15 +817,25 @@ if _SSH_TEST:
|
||||
|
||||
# Define actions based on payload
|
||||
def handle_payload(payload):
|
||||
payload = payload.lower()
|
||||
if payload == 'm-server':
|
||||
try:
|
||||
pl = json.loads(payload)
|
||||
except:
|
||||
pl = payload
|
||||
logging.debug(pl)
|
||||
|
||||
if "host" in pl:
|
||||
if pl["host"] == 'm-server':
|
||||
logging.info("💡 Starting backup job")
|
||||
backup_job(payload)
|
||||
backup_job(pl)
|
||||
logging.info(f"💡 Finished backup job")
|
||||
elif pl["host"] == 'nas':
|
||||
logging.info("💡 Starting backup job")
|
||||
backup_job(pl)
|
||||
logging.info(f"💡 Finished backup job")
|
||||
|
||||
else:
|
||||
logging.error(f"⚠️ Unknown command: {payload}")
|
||||
|
||||
logging.error(f"⚠️ Unknown command: {pl}")
|
||||
else:
|
||||
logging.error(f"⚠️ Wrong payload: {pl}")
|
||||
# Callback when connected
|
||||
def on_connect(client, userdata, flags, rc):
|
||||
if rc == 0:
|
||||
@@ -783,5 +863,7 @@ if USE_TLS:
|
||||
|
||||
# Connect and loop forever
|
||||
client.connect(BROKER, PORT, keepalive=60)
|
||||
client.publish("sectorq/backups/start", "finished", qos=0, retain=False)
|
||||
|
||||
|
||||
client.publish("sectorq/backups/start", "finished", qos=0, retain=True)
|
||||
client.loop_forever()
|
Reference in New Issue
Block a user