encrypt backups part zwo
This commit is contained in:
@@ -29,8 +29,6 @@ class BackupManager:
|
||||
self.app = app
|
||||
self.encryption_manager = EncryptionManager(logger, app)
|
||||
|
||||
|
||||
|
||||
def cancel_and_delete_privileged_backup(self, delete_path: str):
|
||||
"""Cancels a running system backup and deletes the target directory in one atomic pkexec call."""
|
||||
if not self.process or self.process.poll() is not None:
|
||||
@@ -61,7 +59,6 @@ class BackupManager:
|
||||
|
||||
except ProcessLookupError:
|
||||
self.logger.log("Backup process already terminated before action.")
|
||||
# Still try to delete the directory
|
||||
self.delete_privileged_path(delete_path)
|
||||
except Exception as e:
|
||||
self.logger.log(
|
||||
@@ -90,8 +87,6 @@ class BackupManager:
|
||||
"""Runs the deletion and puts a message on the queue when done."""
|
||||
try:
|
||||
info_file = f"{path}.txt"
|
||||
# Build a script to remove both the folder and the info file in one go.
|
||||
# Use -f to avoid errors if the info file doesn't exist.
|
||||
script_content = f"""
|
||||
rm -rf '{path}'
|
||||
rm -f '{info_file}'
|
||||
@@ -108,7 +103,7 @@ rm -f '{info_file}'
|
||||
queue.put(('deletion_complete', False))
|
||||
|
||||
def cancel_backup(self):
|
||||
if self.process and self.process.poll() is None: # Check if process is still running
|
||||
if self.process and self.process.poll() is None:
|
||||
self.logger.log("Attempting to cancel backup...")
|
||||
try:
|
||||
pgid = os.getpgid(self.process.pid)
|
||||
@@ -129,6 +124,7 @@ rm -f '{info_file}'
|
||||
self.logger.log("No active backup process to cancel.")
|
||||
|
||||
def start_backup(self, queue, source_path: str, dest_path: str, is_system: bool, is_dry_run: bool = False, exclude_files: Optional[List[Path]] = None, source_size: int = 0, is_compressed: bool = False, is_encrypted: bool = False, mode: str = "incremental", password: str = None):
|
||||
self.is_system_process = is_system
|
||||
thread = threading.Thread(target=self._run_backup_path, args=(
|
||||
queue, source_path, dest_path, is_system, is_dry_run, exclude_files, source_size, is_compressed, is_encrypted, mode, password))
|
||||
thread.daemon = True
|
||||
@@ -161,8 +157,6 @@ rm -f '{info_file}'
|
||||
archive_name = os.path.basename(dest_path) + ".tar.gz"
|
||||
archive_path = os.path.join(parent_dir, archive_name)
|
||||
|
||||
# Using -C is important to avoid storing the full path in the tarball
|
||||
# Ensure paths with spaces are quoted for the shell script
|
||||
tar_command = f"tar -czf '{archive_path}' -C '{parent_dir}' '{os.path.basename(dest_path)}'"
|
||||
rm_command = f"rm -rf '{dest_path}'"
|
||||
|
||||
@@ -186,7 +180,6 @@ set -e
|
||||
self.logger.log("Compression and cleanup script failed.")
|
||||
return False
|
||||
else:
|
||||
# For non-system backups, run commands directly
|
||||
try:
|
||||
self.logger.log(f"Executing local command: {tar_command}")
|
||||
tar_result = subprocess.run(tar_command, shell=True, capture_output=True, text=True, check=True)
|
||||
@@ -206,36 +199,32 @@ set -e
|
||||
self.logger.log(f"An unexpected error occurred during local compression/cleanup: {e}")
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def _run_backup_path(self, queue, source_path: str, dest_path: str, is_system: bool, is_dry_run: bool, exclude_files: Optional[List[Path]], source_size: int, is_compressed: bool, is_encrypted: bool, mode: str, password: str):
|
||||
try:
|
||||
mount_point = None
|
||||
if is_encrypted:
|
||||
# For encrypted backups, the dest_path is the container file.
|
||||
container_path = dest_path + ".luks"
|
||||
# Estimate container size to be 110% of source size
|
||||
base_dest_path = os.path.dirname(dest_path)
|
||||
size_gb = int(source_size / (1024**3) * 1.1) + 1
|
||||
mount_point = self.encryption_manager.setup_encrypted_backup(queue, container_path, size_gb, password)
|
||||
mount_point = self.encryption_manager.setup_encrypted_backup(queue, base_dest_path, size_gb, password)
|
||||
if not mount_point:
|
||||
return # Error or cancellation already handled in setup method
|
||||
return
|
||||
|
||||
# The actual destination for rsync is the mount point
|
||||
rsync_dest = mount_point
|
||||
rsync_base_dest = mount_point
|
||||
rsync_dest = os.path.join(rsync_base_dest, os.path.basename(dest_path))
|
||||
else:
|
||||
rsync_base_dest = os.path.dirname(dest_path)
|
||||
rsync_dest = dest_path
|
||||
|
||||
self.logger.log(
|
||||
f"Starting backup from '{source_path}' to '{dest_path}'...")
|
||||
f"Starting backup from '{source_path}' to '{rsync_dest}'...")
|
||||
|
||||
if os.path.isdir(source_path) and not source_path.endswith('/'):
|
||||
source_path += '/'
|
||||
|
||||
parent_dest = os.path.dirname(dest_path)
|
||||
# Ensure the parent directory exists. For system backups, rsync with pkexec will create the final destination.
|
||||
# For user backups, this creates the destination.
|
||||
if not os.path.exists(parent_dest):
|
||||
os.makedirs(parent_dest, exist_ok=True)
|
||||
if not os.path.exists(rsync_base_dest):
|
||||
os.makedirs(rsync_base_dest, exist_ok=True)
|
||||
|
||||
latest_backup_path = self._find_latest_backup(parent_dest)
|
||||
latest_backup_path = self._find_latest_backup(rsync_base_dest)
|
||||
|
||||
command = []
|
||||
if is_system:
|
||||
@@ -243,7 +232,7 @@ set -e
|
||||
else:
|
||||
command.extend(['rsync', '-av'])
|
||||
|
||||
if mode == "incremental" and latest_backup_path and not is_dry_run and not is_encrypted:
|
||||
if mode == "incremental" and latest_backup_path and not is_dry_run:
|
||||
self.logger.log(f"Using --link-dest='{latest_backup_path}'")
|
||||
command.append(f"--link-dest={latest_backup_path}")
|
||||
|
||||
@@ -273,9 +262,9 @@ set -e
|
||||
status = 'error'
|
||||
if return_code == 0:
|
||||
status = 'success'
|
||||
elif return_code in [23, 24]: # rsync warnings
|
||||
elif return_code in [23, 24]:
|
||||
status = 'warning'
|
||||
elif return_code in [143, -15, 15, -9]: # SIGTERM/SIGKILL
|
||||
elif return_code in [143, -15, 15, -9]:
|
||||
status = 'cancelled'
|
||||
|
||||
if status in ['success', 'warning'] and not is_dry_run:
|
||||
@@ -284,15 +273,10 @@ set -e
|
||||
self.logger.log(f"latest_backup_path: {latest_backup_path}")
|
||||
self.logger.log(f"source_size (from UI): {source_size}")
|
||||
|
||||
if mode == "full": # If explicitly a full backup
|
||||
if mode == "full" or latest_backup_path is None:
|
||||
final_size = total_size if total_size > 0 else source_size
|
||||
self.logger.log(f"Explicit Full backup: final_size set to {final_size} (total_size if >0 else source_size)")
|
||||
elif latest_backup_path is None: # This was the first backup to this location (implicitly full)
|
||||
final_size = total_size if total_size > 0 else source_size
|
||||
self.logger.log(f"Implicit Full backup (first to location): final_size set to {final_size} (total_size if >0 else source_size)")
|
||||
else: # This was an incremental backup
|
||||
else:
|
||||
final_size = transferred_size
|
||||
self.logger.log(f"Incremental backup: final_size set to {final_size} (transferred_size)")
|
||||
|
||||
if is_compressed:
|
||||
self.logger.log(f"Compression requested for {dest_path}")
|
||||
@@ -320,13 +304,13 @@ set -e
|
||||
self.logger.log(
|
||||
f"Backup to '{dest_path}' completed.")
|
||||
finally:
|
||||
if is_encrypted and 'mount_point' in locals() and mount_point:
|
||||
self.encryption_manager.cleanup_encrypted_backup(f"pybackup_{os.path.basename(dest_path + '.luks')}", mount_point)
|
||||
if is_encrypted and mount_point:
|
||||
mapper_name = f"pybackup_{os.path.basename(os.path.dirname(dest_path))}"
|
||||
self.encryption_manager.cleanup_encrypted_backup(mapper_name, mount_point)
|
||||
self.process = None
|
||||
|
||||
def _create_info_file(self, dest_path: str, filename: str, source_size: int):
|
||||
try:
|
||||
# Info file is now stored in the parent directory of the backup folder.
|
||||
parent_dir = os.path.dirname(dest_path)
|
||||
info_file_path = os.path.join(parent_dir, filename)
|
||||
|
||||
@@ -365,7 +349,6 @@ set -e
|
||||
total_size = 0
|
||||
try:
|
||||
try:
|
||||
# Force C locale to ensure rsync output is in English for parsing
|
||||
env = os.environ.copy()
|
||||
env['LC_ALL'] = 'C'
|
||||
self.process = subprocess.Popen(
|
||||
@@ -381,11 +364,11 @@ set -e
|
||||
queue.put(('error', None))
|
||||
return 0, 0
|
||||
|
||||
if self.process is None: # This check might be redundant if exceptions are caught, but good for safety
|
||||
if self.process is None:
|
||||
self.logger.log(
|
||||
"Error: subprocess.Popen returned None for rsync process (after exception handling).")
|
||||
queue.put(('error', None))
|
||||
return 0, 0 # Exit early if process didn't start
|
||||
return 0, 0
|
||||
|
||||
progress_regex = re.compile(r'\s*(\d+)%\s+')
|
||||
output_lines = []
|
||||
@@ -394,7 +377,7 @@ set -e
|
||||
full_stdout = []
|
||||
for line in iter(self.process.stdout.readline, ''):
|
||||
stripped_line = line.strip()
|
||||
self.logger.log(f"Rsync stdout line: {stripped_line}") # Log every line
|
||||
self.logger.log(f"Rsync stdout line: {stripped_line}")
|
||||
full_stdout.append(stripped_line)
|
||||
|
||||
match = progress_regex.search(stripped_line)
|
||||
@@ -409,23 +392,20 @@ set -e
|
||||
if self.process.stderr:
|
||||
stderr_output = self.process.stderr.read()
|
||||
if stderr_output:
|
||||
self.logger.log(f"Rsync Stderr: {stderr_output.strip()}") # Log stderr
|
||||
full_stdout.extend(stderr_output.strip().split('\n')) # Add stderr to output_lines for parsing
|
||||
self.logger.log(f"Rsync Stderr: {stderr_output.strip()}")
|
||||
full_stdout.extend(stderr_output.strip().split('\n'))
|
||||
|
||||
output_lines = full_stdout # Use the collected stdout/stderr for parsing
|
||||
output_lines = full_stdout
|
||||
|
||||
# After process completion, parse the output for transferred size.
|
||||
# This is tricky because the output format can vary. We'll try to find the
|
||||
# summary line from --info=progress2, which looks like "sent X bytes received Y bytes".
|
||||
transferred_size = 0
|
||||
total_size = 0
|
||||
summary_regex = re.compile(r"sent ([\d,.]+) bytes\s+received ([\d,.]+) bytes")
|
||||
total_size_regex = re.compile(r"total size is ([\d,.]+) speedup")
|
||||
|
||||
|
||||
for line in reversed(output_lines): # Search from the end, as summary is usually last
|
||||
for line in reversed(output_lines):
|
||||
match = summary_regex.search(line)
|
||||
if match and transferred_size == 0: # Only set if not already found
|
||||
if match and transferred_size == 0:
|
||||
try:
|
||||
sent_str = match.group(1).replace(',', '').replace('.', '')
|
||||
received_str = match.group(2).replace(',', '').replace('.', '')
|
||||
@@ -439,7 +419,7 @@ set -e
|
||||
f"Could not parse sent/received bytes from line: '{line}'. Error: {e}")
|
||||
|
||||
total_match = total_size_regex.search(line)
|
||||
if total_match and total_size == 0: # Only set if not already found
|
||||
if total_match and total_size == 0:
|
||||
try:
|
||||
total_size_str = total_match.group(1).replace(',', '').replace('.', '')
|
||||
total_size = int(total_size_str)
|
||||
@@ -450,7 +430,6 @@ set -e
|
||||
self.logger.log(f"_execute_rsync final parsed values: transferred_size={transferred_size}, total_size={total_size}")
|
||||
|
||||
if transferred_size == 0:
|
||||
# Fallback for --stats format if the regex fails
|
||||
bytes_sent = 0
|
||||
bytes_received = 0
|
||||
for line in output_lines:
|
||||
@@ -487,13 +466,10 @@ set -e
|
||||
|
||||
def start_restore(self, source_path: str, dest_path: str, is_compressed: bool):
|
||||
"""Starts a restore process in a separate thread."""
|
||||
# We need the queue from the app instance to report progress
|
||||
# A bit of a hack, but avoids passing the queue all the way down from the UI
|
||||
try:
|
||||
queue = self.app.queue
|
||||
except AttributeError:
|
||||
self.logger.log("Could not get queue from app instance. Restore progress will not be reported.")
|
||||
# Create a dummy queue
|
||||
from queue import Queue
|
||||
queue = Queue()
|
||||
|
||||
@@ -508,12 +484,8 @@ set -e
|
||||
status = 'error'
|
||||
try:
|
||||
if is_compressed:
|
||||
# For compressed files, we extract to the destination.
|
||||
# The -C flag tells tar to change to that directory before extracting.
|
||||
script_content = f"tar -xzf '{source_path}' -C '{dest_path}'"
|
||||
else:
|
||||
# For regular directories, we rsync the content.
|
||||
# Ensure source path has a trailing slash to copy contents.
|
||||
source = source_path.rstrip('/') + '/'
|
||||
script_content = f"rsync -aAXHv '{source}' '{dest_path}'"
|
||||
|
||||
@@ -528,8 +500,6 @@ set -e
|
||||
self.logger.log(f"An unexpected error occurred during restore: {e}")
|
||||
status = 'error'
|
||||
finally:
|
||||
# Use a generic completion message for now.
|
||||
# The queue processing logic in main_app might need a 'restore_completion' type.
|
||||
queue.put(('completion', {'status': status, 'returncode': 0 if status == 'success' else 1}))
|
||||
|
||||
def get_scheduled_jobs(self) -> List[Dict[str, Any]]:
|
||||
@@ -620,12 +590,10 @@ set -e
|
||||
if not os.path.isdir(pybackup_path):
|
||||
return system_backups
|
||||
|
||||
# Regex to parse folder names like '6-März-2024_143000_system_full' or '6-März-2024_143000_system_full.tar.gz'
|
||||
name_regex = re.compile(
|
||||
r"^(\d{1,2}-\w+-\d{4})_(\d{6})_system_(full|incremental)(\.tar\.gz|\.luks)?$", re.IGNORECASE)
|
||||
|
||||
for item in os.listdir(pybackup_path):
|
||||
# Skip info files
|
||||
if item.endswith('.txt'):
|
||||
continue
|
||||
|
||||
@@ -635,7 +603,6 @@ set -e
|
||||
|
||||
full_path = os.path.join(pybackup_path, item)
|
||||
date_str = match.group(1)
|
||||
# time_str = match.group(2) # Not currently used in UI, but available
|
||||
backup_type_base = match.group(3).capitalize()
|
||||
extension = match.group(4)
|
||||
is_compressed = (extension == ".tar.gz")
|
||||
@@ -650,14 +617,13 @@ set -e
|
||||
backup_size = "N/A"
|
||||
comment = ""
|
||||
|
||||
# Info file is named after the backup item (e.g., 'backup_name.txt' or 'backup_name.tar.gz.txt')
|
||||
info_file_path = os.path.join(pybackup_path, f"{item}.txt")
|
||||
if os.path.exists(info_file_path):
|
||||
try:
|
||||
with open(info_file_path, 'r') as f:
|
||||
for line in f:
|
||||
if line.strip().lower().startswith("originalgröße:"):
|
||||
size_match = re.search(r":\s*(.*?)\s*\(", line)
|
||||
size_match = re.search(r":\s*(.*?)\s*(" , line)
|
||||
if size_match:
|
||||
backup_size = size_match.group(1).strip()
|
||||
else:
|
||||
@@ -679,14 +645,12 @@ set -e
|
||||
"is_encrypted": is_encrypted
|
||||
})
|
||||
|
||||
# Sort by parsing the date from the folder name
|
||||
try:
|
||||
system_backups.sort(key=lambda x: datetime.datetime.strptime(
|
||||
x['date'], '%d-%B-%Y'), reverse=True)
|
||||
except ValueError:
|
||||
self.logger.log(
|
||||
"Could not sort backups by date due to format mismatch.")
|
||||
# Fallback to simple string sort if date parsing fails
|
||||
system_backups.sort(key=lambda x: x['folder_name'], reverse=True)
|
||||
|
||||
return system_backups
|
||||
@@ -702,10 +666,8 @@ set -e
|
||||
if not os.path.isdir(full_path):
|
||||
continue
|
||||
|
||||
# NEW: Look for info file in the parent directory, named after the backup folder
|
||||
info_file_path = os.path.join(base_backup_path, f"{item}.txt")
|
||||
|
||||
# We identify a user backup by the presence of its corresponding info file.
|
||||
if os.path.exists(info_file_path):
|
||||
backup_size = "N/A"
|
||||
backup_date = "N/A"
|
||||
@@ -714,7 +676,7 @@ set -e
|
||||
with open(info_file_path, 'r') as f:
|
||||
for line in f:
|
||||
if line.strip().lower().startswith("originalgröße:"):
|
||||
size_match = re.search(r":\s*(.*?)\s*\(", line)
|
||||
size_match = re.search(r":\s*(.*?)\s*(" , line)
|
||||
if size_match:
|
||||
backup_size = size_match.group(1).strip()
|
||||
else:
|
||||
@@ -763,10 +725,9 @@ set -e
|
||||
new_lines = []
|
||||
for line in lines:
|
||||
if line.strip().lower().startswith("kommentar:"):
|
||||
if new_comment: # Update existing comment
|
||||
if new_comment:
|
||||
new_lines.append(f"Kommentar: {new_comment}\n")
|
||||
comment_found = True
|
||||
# If new_comment is empty, the old line is effectively deleted
|
||||
else:
|
||||
new_lines.append(line)
|
||||
|
||||
@@ -793,4 +754,4 @@ set -e
|
||||
self.logger.log("Error: 'pkexec' or 'rsync' command not found.")
|
||||
except Exception as e:
|
||||
self.logger.log(
|
||||
f"An unexpected error occurred during pkexec rsync test: {e}")
|
||||
f"An unexpected error occurred during pkexec rsync test: {e}")
|
||||
@@ -25,10 +25,10 @@ class EncryptionManager:
|
||||
try:
|
||||
return keyring.get_password(self.service_id, username)
|
||||
except keyring.errors.InitError as e:
|
||||
self.logger.log(f"Could not initialize keyring. Keyring is not available on this system or is not configured correctly. Error: {e}")
|
||||
logger.log(f"Could not initialize keyring. Keyring is not available on this system or is not configured correctly. Error: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
self.logger.log(f"Could not get password from keyring: {e}")
|
||||
logger.log(f"Could not get password from keyring: {e}")
|
||||
return None
|
||||
|
||||
def set_password_in_keyring(self, username: str, password: str) -> bool:
|
||||
@@ -69,52 +69,60 @@ class EncryptionManager:
|
||||
|
||||
return password
|
||||
|
||||
def setup_encrypted_backup(self, queue, container_path: str, size_gb: int, password: str) -> Optional[str]:
|
||||
"""Sets up a LUKS encrypted container for the backup."""
|
||||
self.logger.log(f"Setting up encrypted container at {container_path}")
|
||||
def setup_encrypted_backup(self, queue, base_path: str, size_gb: int, password: str) -> Optional[str]:
|
||||
"""Sets up a persistent LUKS encrypted container for the backup destination."""
|
||||
self.logger.log(f"Setting up encrypted container at {base_path}")
|
||||
|
||||
if not shutil.which("cryptsetup"):
|
||||
self.logger.log("Error: cryptsetup is not installed.")
|
||||
queue.put(('error', "cryptsetup is not installed."))
|
||||
return None
|
||||
|
||||
mapper_name = f"pybackup_{os.path.basename(container_path)}"
|
||||
container_file = "pybackup_encrypted.luks"
|
||||
container_path = os.path.join(base_path, container_file)
|
||||
mapper_name = f"pybackup_{os.path.basename(base_path)}"
|
||||
mount_point = f"/mnt/{mapper_name}"
|
||||
|
||||
if not password:
|
||||
self.logger.log("No password provided for encryption.")
|
||||
queue.put(('error', "No password provided for encryption."))
|
||||
return None
|
||||
|
||||
# If mount point already exists, something is wrong. Clean up first.
|
||||
if os.path.ismount(mount_point):
|
||||
self.logger.log(f"Mount point {mount_point} already exists. Cleaning up before proceeding.")
|
||||
self.cleanup_encrypted_backup(mapper_name, mount_point)
|
||||
|
||||
if os.path.exists(container_path):
|
||||
self.logger.log(f"Encrypted container {container_path} already exists. Attempting to unlock.")
|
||||
if not password:
|
||||
self.logger.log("No password provided for existing encrypted container.")
|
||||
queue.put(('error', "No password provided for existing encrypted container."))
|
||||
return None
|
||||
|
||||
script = f"""
|
||||
echo -n '{password}' | cryptsetup luksOpen {container_path} {mapper_name} -
|
||||
mkdir -p {mount_point}
|
||||
echo -n '{password}' | cryptsetup luksOpen {container_path} {mapper_name} -
|
||||
mount /dev/mapper/{mapper_name} {mount_point}
|
||||
"""
|
||||
if not self._execute_as_root(script):
|
||||
self.logger.log("Failed to unlock existing encrypted container.")
|
||||
self.logger.log("Failed to unlock existing encrypted container. Check password or permissions.")
|
||||
queue.put(('error', "Failed to unlock existing encrypted container."))
|
||||
# Clean up failed mount attempt
|
||||
self.cleanup_encrypted_backup(mapper_name, mount_point)
|
||||
return None
|
||||
else:
|
||||
if not password:
|
||||
self.logger.log("No password provided to create encrypted container.")
|
||||
queue.put(('error', "No password provided to create encrypted container."))
|
||||
return None
|
||||
|
||||
self.logger.log(f"Creating new encrypted container: {container_path}")
|
||||
script = f"""
|
||||
fallocate -l {size_gb}G {container_path}
|
||||
echo -n '{password}' | cryptsetup luksFormat {container_path} -
|
||||
mkdir -p {mount_point}
|
||||
echo -n '{password}' | cryptsetup luksOpen {container_path} {mapper_name} -
|
||||
mkfs.ext4 /dev/mapper/{mapper_name}
|
||||
mkdir -p {mount_point}
|
||||
mount /dev/mapper/{mapper_name} {mount_point}
|
||||
"""
|
||||
|
||||
if not self._execute_as_root(script):
|
||||
self.logger.log("Failed to setup encrypted container.")
|
||||
self.logger.log("Failed to create and setup encrypted container.")
|
||||
self._cleanup_encrypted_backup(mapper_name, mount_point)
|
||||
# Also remove the failed container file
|
||||
if os.path.exists(container_path):
|
||||
os.remove(container_path) # This should be done with pkexec as well
|
||||
queue.put(('error', "Failed to setup encrypted container."))
|
||||
return None
|
||||
|
||||
@@ -125,9 +133,9 @@ class EncryptionManager:
|
||||
"""Unmounts and closes the LUKS container."""
|
||||
self.logger.log(f"Cleaning up encrypted backup: {mapper_name}")
|
||||
script = f"""
|
||||
umount {mount_point} || echo "Mount point not found or already unmounted."
|
||||
cryptsetup luksClose {mapper_name} || echo "Mapper not found or already closed."
|
||||
rmdir {mount_point} || echo "Mount point directory not found."
|
||||
umount {mount_point} || echo "Mount point {mount_point} not found or already unmounted."
|
||||
cryptsetup luksClose {mapper_name} || echo "Mapper {mapper_name} not found or already closed."
|
||||
rmdir {mount_point} || echo "Mount point directory {mount_point} not found or already removed."
|
||||
"""
|
||||
if not self._execute_as_root(script):
|
||||
self.logger.log("Encrypted backup cleanup script failed.")
|
||||
@@ -169,4 +177,4 @@ rmdir {mount_point} || echo "Mount point directory not found."
|
||||
return False
|
||||
finally:
|
||||
if script_path and os.path.exists(script_path):
|
||||
os.remove(script_path)
|
||||
os.remove(script_path)
|
||||
Reference in New Issue
Block a user