From f560af36a7444c3f4235acb96e2464b8a4218e57 Mon Sep 17 00:00:00 2001 From: cam Date: Sun, 8 Sep 2024 23:47:36 +1200 Subject: [PATCH 1/3] Fix object backup --- backups/object-backup/object-backup.py | 31 +++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/backups/object-backup/object-backup.py b/backups/object-backup/object-backup.py index 8e15963..f14cf91 100755 --- a/backups/object-backup/object-backup.py +++ b/backups/object-backup/object-backup.py @@ -73,23 +73,31 @@ def get_archive_key(key): return os.path.join(config["archive"]["prefix"], key) +def dont_backup_key(key): + if key.endswith("-thumb"): + return True # It is very easy to configure it to upload to the wrong bucket, so this checks that at least 80 # out a random 100 recordings are already on the target bucket. Meaning it's probably the correct bucket. +## TODO: Make an API request to the server for getting a random sample of keys from the target bucket. print( "Check that some files already match as a way of checking that the correct buckets are being/prefix used." ) keys = [] i = 0 -for obj in local_bucket.objects.page_size(10000): +keys_sample_size = 10000 +for obj in local_bucket.objects.page_size(1000): + if dont_backup_key(obj.key): + continue keys.append(obj.key) i += 1 - if i >= 10000: + if i >= keys_sample_size: break -random_keys = [] -for i in range(100): - random_keys.append(random.choice(keys)) +random.shuffle(keys) + +# Select the first 100 random keys after shuffling +random_keys = keys[:100] matching = 0 @@ -106,9 +114,16 @@ def check_matching_key(key): for key in random_keys: executor.submit(check_matching_key, key) -if matching < 50: +minimum_matching = 60 +if matching < minimum_matching: print( - f"{matching} out of 100 objects are already on the target bucket. Canceling backup." + textwrap.dedent( + f""" + Only {matching} out of 100 objects are already on the target bucket.This is less than {minimum_matching}. + A minimum of {minimum_matching} is required. Canceling backup. + This can be cased by a bucket misconfiguration or not a high enough keys to sample from, current size ({keys_sample_size}) + """ + ) ) time.sleep(2) sys.exit(0) @@ -138,7 +153,7 @@ def handle_file(obj): global file_changed_count global matching_count try: - if obj.key.endswith("-thumb"): + if dont_backup_key(obj.key): return archive_key = os.path.join(config["archive"]["prefix"], obj.key) archive_obj = archive_bucket.Object(archive_key) From 71e9e2c20fa7e25b71287b8d32599af2862da347 Mon Sep 17 00:00:00 2001 From: cam Date: Sun, 8 Sep 2024 23:58:54 +1200 Subject: [PATCH 2/3] Add formatting check --- .github/workflows/black.yml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/workflows/black.yml diff --git a/.github/workflows/black.yml b/.github/workflows/black.yml new file mode 100644 index 0000000..558cc65 --- /dev/null +++ b/.github/workflows/black.yml @@ -0,0 +1,32 @@ +name: Check Python code format + +on: + push: + branches: + - '**' + tags: + - '*' + pull_request: + branches: + - '**' + +jobs: + black: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + pip install black + + - name: Check formatting with black + run: | + black --check . From 8208cb7dbf0948eb70286b12816c188d3a5aeffa Mon Sep 17 00:00:00 2001 From: cam Date: Mon, 9 Sep 2024 00:01:15 +1200 Subject: [PATCH 3/3] Run black formatter --- backups/grafana/grafana-backup.py | 46 ++++++----- backups/object-backup/object-backup.py | 2 + backups/object-recover/object-recover.py | 21 +++-- backups/psql-backup/psql-backup.py | 92 ++++++++++++--------- backups/salt-backup/salt-backup.py | 20 +++-- salt/check-salt-keys/check-salt-keys.py | 18 ++-- salt/deprecated-scripts/salt-auto-update.py | 14 +++- salt/key-accept-server/key-accept-server.py | 62 ++++++++++---- salt/schedule-commands/schedule-commands.py | 2 + 9 files changed, 180 insertions(+), 97 deletions(-) diff --git a/backups/grafana/grafana-backup.py b/backups/grafana/grafana-backup.py index fe07f86..1b84abc 100755 --- a/backups/grafana/grafana-backup.py +++ b/backups/grafana/grafana-backup.py @@ -15,25 +15,25 @@ print(f"failed to find config file '{CONFIG_FILE}'") sys.exit() -with open(CONFIG_FILE, 'r') as f: +with open(CONFIG_FILE, "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) print("Running grafana backup") # File paths for Grafana configuration, database files, and plugins folder -GRAFANA_INI_FILE = '/etc/grafana/grafana.ini' -GRAFANA_DB_FILE = '/var/lib/grafana/grafana.db' -GRAFANA_PLUGINS_FOLDER = '/var/lib/grafana/plugins/' +GRAFANA_INI_FILE = "/etc/grafana/grafana.ini" +GRAFANA_DB_FILE = "/var/lib/grafana/grafana.db" +GRAFANA_PLUGINS_FOLDER = "/var/lib/grafana/plugins/" # Temporary backup file names -TMP_BACKUP_INI_FILE = '/tmp/grafana_backup_ini.ini' -TMP_BACKUP_DB_FILE = '/tmp/grafana_backup_db.db' -TMP_BACKUP_PLUGINS_ZIP = '/tmp/grafana_backup_plugins.zip' +TMP_BACKUP_INI_FILE = "/tmp/grafana_backup_ini.ini" +TMP_BACKUP_DB_FILE = "/tmp/grafana_backup_db.db" +TMP_BACKUP_PLUGINS_ZIP = "/tmp/grafana_backup_plugins.zip" # Backup path target file names -BACKUP_INI_FILE = 'grafana.ini' -BACKUP_DB_FILE = 'grafana.db' -BACKUP_PLUGINS_ZIP = 'grafana_plugins.zip' +BACKUP_INI_FILE = "grafana.ini" +BACKUP_DB_FILE = "grafana.db" +BACKUP_PLUGINS_ZIP = "grafana_plugins.zip" # Copy files to temporary backup files print("Copying files to temporary backup files") @@ -42,20 +42,25 @@ # Zip plugins folder print("Zipping plugins folder") -archive_path, _ = os.path.splitext(TMP_BACKUP_PLUGINS_ZIP) # Remove extension as make_archive will add it -shutil.make_archive(archive_path, 'zip', GRAFANA_PLUGINS_FOLDER) +archive_path, _ = os.path.splitext( + TMP_BACKUP_PLUGINS_ZIP +) # Remove extension as make_archive will add it +shutil.make_archive(archive_path, "zip", GRAFANA_PLUGINS_FOLDER) # Initialize Backblaze B2 API print("Initializing Backblaze B2 API") info = InMemoryAccountInfo() b2_api = B2Api(info) -b2_api.authorize_account("production", config["b2"]["app_key_id"], config["b2"]["app_key"]) +b2_api.authorize_account( + "production", config["b2"]["app_key_id"], config["b2"]["app_key"] +) bucket = b2_api.get_bucket_by_name(config["b2"]["bucket"]) + # Upload file to Backblaze B2 and delete original file def upload_to_b2(file_name, object_name): try: - with open(file_name, 'rb') as f: + with open(file_name, "rb") as f: data = f.read() source = UploadSourceBytes(data) bucket.upload(source, object_name) @@ -65,7 +70,8 @@ def upload_to_b2(file_name, object_name): except Exception as e: print(f"File {file_name} could not be uploaded to Backblaze B2. Error: {e}") return False - + + print("Uploading files to Backblaze B2") success = True success &= upload_to_b2(TMP_BACKUP_INI_FILE, BACKUP_INI_FILE) @@ -78,16 +84,18 @@ def upload_to_b2(file_name, object_name): print("Grafana backup failed") print("Logging to influx") -json_body = [{ +json_body = [ + { "measurement": "backup", "tags": { "host": HOST_NAME, }, "fields": { "success": 1.0 if success else 0.0, - } - }] -client = InfluxDBClient(**config['influx']) + }, + } +] +client = InfluxDBClient(**config["influx"]) print(json_body) client.write_points(json_body) diff --git a/backups/object-backup/object-backup.py b/backups/object-backup/object-backup.py index f14cf91..7fb9a43 100755 --- a/backups/object-backup/object-backup.py +++ b/backups/object-backup/object-backup.py @@ -73,10 +73,12 @@ def get_archive_key(key): return os.path.join(config["archive"]["prefix"], key) + def dont_backup_key(key): if key.endswith("-thumb"): return True + # It is very easy to configure it to upload to the wrong bucket, so this checks that at least 80 # out a random 100 recordings are already on the target bucket. Meaning it's probably the correct bucket. ## TODO: Make an API request to the server for getting a random sample of keys from the target bucket. diff --git a/backups/object-recover/object-recover.py b/backups/object-recover/object-recover.py index 07273e4..247b3b3 100755 --- a/backups/object-recover/object-recover.py +++ b/backups/object-recover/object-recover.py @@ -8,6 +8,7 @@ import os from minio import Minio + def check_file_exists(minio_client, bucket_name, object_name): try: minio_client.stat_object(bucket_name, object_name) @@ -50,11 +51,11 @@ def check_file_exists(minio_client, bucket_name, object_name): object_keys = file_object_keys + raw_file_object_keys minio_client = Minio( - minio["endpoint"], - access_key=minio["access_key"], - secret_key=minio["secret_key"], - secure=minio["http"], - ) + minio["endpoint"], + access_key=minio["access_key"], + secret_key=minio["secret_key"], + secure=minio["http"], +) print("Finding keys that are not in local object store") transfers = [] @@ -69,24 +70,28 @@ def check_file_exists(minio_client, bucket_name, object_name): completed_transfers = 0 lock = Lock() + def transfer_file(source, destination): try: subprocess.run( ["mc", "cp", "--quiet", source, destination], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, - check=True) + check=True, + ) with lock: global completed_transfers completed_transfers += 1 - print(f"{completed_transfers}/{len(transfers)} Transferred '{source}' to '{destination}'") + print( + f"{completed_transfers}/{len(transfers)} Transferred '{source}' to '{destination}'" + ) except subprocess.CalledProcessError as e: print(f"Failed to transfer '{source}': {e}") + size = len(transfers) print(f"Objects to recover: {size}") with ThreadPoolExecutor(max_workers=20) as executor: results = list(executor.map(lambda args: transfer_file(*args), transfers)) - \ No newline at end of file diff --git a/backups/psql-backup/psql-backup.py b/backups/psql-backup/psql-backup.py index 592841c..e989fb7 100755 --- a/backups/psql-backup/psql-backup.py +++ b/backups/psql-backup/psql-backup.py @@ -12,6 +12,7 @@ CONFIG_FILE = "./psql-backup.yaml" DUMP_EXT = ".pgdump" + def upload(bucket, dump_path, dump_key, retries=3): for i in range(retries): try: @@ -24,10 +25,12 @@ def upload(bucket, dump_path, dump_key, retries=3): sys.exit() print(f"upload failed, trying {retries-i-1} more times") + def check_if_file_exists(bucket, key, size): files = list(bucket.objects.filter(Prefix=key)) return len(files) == 1 and files[0].key == key and files[0].size == size + dry_run = False success = 1 if len(sys.argv) > 1: @@ -44,34 +47,48 @@ def check_if_file_exists(bucket, key, size): print(f"failed to find config file '{CONFIG_FILE}'") sys.exit() -with open(CONFIG_FILE, 'r') as f: +with open(CONFIG_FILE, "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) -database = config['database'] +database = config["database"] date_str = datetime.datetime.now().strftime("%F") print("Making pgdump file") dump_name = f"{database}_{date_str}{DUMP_EXT}" -dump_path = os.path.join('/var/lib/postgresql', dump_name) +dump_path = os.path.join("/var/lib/postgresql", dump_name) dump_size = 0 if dry_run: print("Skipping making dump file in dry run") else: - subprocess.check_call(["sudo", "-i", "-u", "postgres", "pg_dump", "-Fc", database, "--file", dump_name]) + subprocess.check_call( + [ + "sudo", + "-i", + "-u", + "postgres", + "pg_dump", + "-Fc", + database, + "--file", + dump_name, + ] + ) dump_size = os.path.getsize(dump_path) # Backup to each of the daily endpoints -print('Running daily backups') -date_limit = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(config['daily']['days']) -prefix = os.path.join(config['prefix'],"daily", HOST_NAME) +print("Running daily backups") +date_limit = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta( + config["daily"]["days"] +) +prefix = os.path.join(config["prefix"], "daily", HOST_NAME) dump_key = os.path.join(prefix, dump_name) -for daily_backup in config['daily']['buckets']: - s3_config = config['s3_auths'][daily_backup['s3_auth']] +for daily_backup in config["daily"]["buckets"]: + s3_config = config["s3_auths"][daily_backup["s3_auth"]] bucket_name = daily_backup["bucket"] - print(f'Connecting to bucket {bucket_name}') - s3 = boto3.resource('s3', **s3_config) + print(f"Connecting to bucket {bucket_name}") + s3 = boto3.resource("s3", **s3_config) bucket = s3.Bucket(bucket_name) - print(f'Uploading {dump_path} as {dump_key}') + print(f"Uploading {dump_path} as {dump_key}") if dry_run: print("Skipping upload in dry run") else: @@ -80,49 +97,51 @@ def check_if_file_exists(bucket, key, size): uploaded = False for file in bucket.objects.filter(Prefix=prefix): if file.key.endswith(DUMP_EXT): - if (file.last_modified <= date_limit): - print(f'Deleting old backup {file.key}') + if file.last_modified <= date_limit: + print(f"Deleting old backup {file.key}") if dry_run: - print('Skipping deletion in dry run') + print("Skipping deletion in dry run") else: file.delete() if not check_if_file_exists(bucket, dump_key, dump_size): - print('File was not uploaded successfully') + print("File was not uploaded successfully") success = 0.0 - print(f'Finished backup on {bucket_name}') -print("Finished daily backups") + print(f"Finished backup on {bucket_name}") +print("Finished daily backups") # Monthly backups print("Running monthly backups") -prefix = os.path.join(config['prefix'], "monthly", HOST_NAME) +prefix = os.path.join(config["prefix"], "monthly", HOST_NAME) dump_key = os.path.join(prefix, dump_name) -month_start = datetime.datetime.now(datetime.timezone.utc).replace(day=1,hour=0,minute=0, second=0) -for monthly_backup in config['monthly']['buckets']: - s3_config = config['s3_auths'][monthly_backup['s3_auth']] +month_start = datetime.datetime.now(datetime.timezone.utc).replace( + day=1, hour=0, minute=0, second=0 +) +for monthly_backup in config["monthly"]["buckets"]: + s3_config = config["s3_auths"][monthly_backup["s3_auth"]] bucket_name = monthly_backup["bucket"] - print(f'Connecting to bucket {bucket_name}') - s3 = boto3.resource('s3', **s3_config) + print(f"Connecting to bucket {bucket_name}") + s3 = boto3.resource("s3", **s3_config) bucket = s3.Bucket(bucket_name) already_monthly_backup = False - print(f'Checking for backups this month') + print(f"Checking for backups this month") for file in bucket.objects.filter(Prefix=prefix): if file.key.endswith(DUMP_EXT): - if (file.last_modified > month_start): + if file.last_modified > month_start: print(f"Monthly backup already found {file.key}") already_monthly_backup = True break if not already_monthly_backup: print("No backup from this month found.") - print(f'Uploading {dump_path} as {dump_key}') + print(f"Uploading {dump_path} as {dump_key}") if dry_run: print("Skipping upload in dry run") else: upload(bucket, dump_path, dump_key) if not check_if_file_exists(bucket, dump_key, dump_size): - print('File was not uploaded successfully') + print("File was not uploaded successfully") success = 0.0 print("Finished monthly backups") @@ -130,22 +149,21 @@ def check_if_file_exists(bucket, key, size): os.remove(dump_path) print("Logging to influx") -json_body = [{ +json_body = [ + { "measurement": "backup", - "tags": { - "host": HOST_NAME, - "postgresql": config['database'] - }, + "tags": {"host": HOST_NAME, "postgresql": config["database"]}, "fields": { "success": float(success), "size": dump_size, - } - }] -client = InfluxDBClient(**config['influx']) + }, + } +] +client = InfluxDBClient(**config["influx"]) print(json_body) if dry_run: print("Skipping reporting to influx") else: client.write_points(json_body) -print("Finished PostgreSQL backups") \ No newline at end of file +print("Finished PostgreSQL backups") diff --git a/backups/salt-backup/salt-backup.py b/backups/salt-backup/salt-backup.py index 3748e65..717f204 100755 --- a/backups/salt-backup/salt-backup.py +++ b/backups/salt-backup/salt-backup.py @@ -16,7 +16,7 @@ print(f"failed to find config file '{CONFIG_FILE}'") sys.exit() -with open(CONFIG_FILE, 'r') as f: +with open(CONFIG_FILE, "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) print("Running salt backup") @@ -27,14 +27,14 @@ for dir in dirs: shutil.copytree(dir, os.path.join(temp_dir, os.path.basename(dir))) zip_file = f"{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}_salt-backup.zip" -shutil.make_archive(zip_file.replace(".zip", ""), 'zip', temp_dir) +shutil.make_archive(zip_file.replace(".zip", ""), "zip", temp_dir) shutil.rmtree(temp_dir) # Upload backup to each endpoint success = 1 -for endpoint in config['endpoints']: - os.environ['AWS_ACCESS_KEY_ID'] = endpoint['access_key'] - os.environ['AWS_SECRET_ACCESS_KEY'] = endpoint['secret_key'] +for endpoint in config["endpoints"]: + os.environ["AWS_ACCESS_KEY_ID"] = endpoint["access_key"] + os.environ["AWS_SECRET_ACCESS_KEY"] = endpoint["secret_key"] command = f"s5cmd --endpoint-url {endpoint['url']} cp {zip_file} s3://{endpoint['bucket_name']}/{zip_file}" result = subprocess.run(command, shell=True) if result.returncode != 0: @@ -44,16 +44,18 @@ os.remove(zip_file) print("Logging to influx") -json_body = [{ +json_body = [ + { "measurement": "backup", "tags": { "host": HOST_NAME, }, "fields": { "success": float(success), - } - }] -client = InfluxDBClient(**config['influx']) + }, + } +] +client = InfluxDBClient(**config["influx"]) print(json_body) client.write_points(json_body) diff --git a/salt/check-salt-keys/check-salt-keys.py b/salt/check-salt-keys/check-salt-keys.py index abaea9c..0b1eb34 100755 --- a/salt/check-salt-keys/check-salt-keys.py +++ b/salt/check-salt-keys/check-salt-keys.py @@ -2,13 +2,21 @@ import subprocess ## Put all the salt keys that you want to check in a local file called `salt-keys` with a new line for each key -raw_keys = subprocess.run(['cat', 'salt-keys'], stdout=subprocess.PIPE).stdout.decode('utf-8').split("\n") +raw_keys = ( + subprocess.run(["cat", "salt-keys"], stdout=subprocess.PIPE) + .stdout.decode("utf-8") + .split("\n") +) filtered_keys = [] for i in raw_keys: - if i != '': + if i != "": filtered_keys.append(i) -salt_keys = subprocess.run(['salt-key', '-L', '--no-color'], stdout=subprocess.PIPE).stdout.decode('utf-8').split("\n") +salt_keys = ( + subprocess.run(["salt-key", "-L", "--no-color"], stdout=subprocess.PIPE) + .stdout.decode("utf-8") + .split("\n") +) salt_accepted_keys = [] salt_denied_keys = [] @@ -45,8 +53,8 @@ else: unknown_keys.append(i) -#print("accepted keys:", accepted_keys) +# print("accepted keys:", accepted_keys) print("denied keys:", denied_keys) print("unaccepted keys:", unaccepted_keys) print("rejected keys:", rejected_keys) -print("unknown keys:", unknown_keys) \ No newline at end of file +print("unknown keys:", unknown_keys) diff --git a/salt/deprecated-scripts/salt-auto-update.py b/salt/deprecated-scripts/salt-auto-update.py index 59bb15b..4365adf 100755 --- a/salt/deprecated-scripts/salt-auto-update.py +++ b/salt/deprecated-scripts/salt-auto-update.py @@ -31,6 +31,7 @@ # InfluxDB database used to track state DB_NAME = "last-updated" + def main(): print("loading blacklist") blacklist = get_blacklist() @@ -59,20 +60,28 @@ def main(): print("listening for minion ping events") for minion_id in minion_ids: print("scheduling update for", minion_id) - job_id = salt_client.cmd_async(minion_id, "cmd.run", ["if ! salt-updater --version; then salt-call state.apply --state-output=changes; fi;"]) + job_id = salt_client.cmd_async( + minion_id, + "cmd.run", + [ + "if ! salt-updater --version; then salt-call state.apply --state-output=changes; fi;" + ], + ) print(" job id", job_id) state.record_update(minion_id, job_id) + def get_blacklist(): result = subprocess.check_output(["salt", "-N", "blacklist", "--preview-target"]) lines = result.split(b"\n") deviceSet = set() for line in lines: - device = str(line[2:].decode("utf-8")) # each line is preceded with "- " + device = str(line[2:].decode("utf-8")) # each line is preceded with "- " if len(device) > 0: deviceSet.add(device) return deviceSet + def match_minion_ping(event): if event is None: return None @@ -88,6 +97,7 @@ def match_minion_ping(event): return data.get("id") return None + def not_server(minion_id): return not minion_id.startswith("server-") diff --git a/salt/key-accept-server/key-accept-server.py b/salt/key-accept-server/key-accept-server.py index d4396e2..84c613e 100644 --- a/salt/key-accept-server/key-accept-server.py +++ b/salt/key-accept-server/key-accept-server.py @@ -6,35 +6,44 @@ app = Flask(__name__) # Load configuration -with open('config.yaml', 'r') as config_file: +with open("config.yaml", "r") as config_file: config = yaml.safe_load(config_file) -API_PASSWORD = config['password'] -PORT = config['port'] +API_PASSWORD = config["password"] +PORT = config["port"] + def check_key_status(minion_id): - result = subprocess.run(['sudo', 'salt-key', '--list=all', '--out=json'], capture_output=True, text=True) + result = subprocess.run( + ["sudo", "salt-key", "--list=all", "--out=json"], capture_output=True, text=True + ) if result.returncode == 0: keys = eval(result.stdout) - if minion_id in keys['minions']: + if minion_id in keys["minions"]: return "accepted" - elif minion_id in keys['minions_pre']: + elif minion_id in keys["minions_pre"]: return "unaccepted" else: return "not_found" return "error" + def accept_key(minion_id): - result = subprocess.run(['sudo', 'salt-key', '--accept', minion_id, '-y'], capture_output=True, text=True) + result = subprocess.run( + ["sudo", "salt-key", "--accept", minion_id, "-y"], + capture_output=True, + text=True, + ) return result.returncode == 0 -@app.route('/accept_key', methods=['POST']) + +@app.route("/accept_key", methods=["POST"]) def accept_salt_key(): data = request.get_json() if data is None: return jsonify({"status": "error", "message": "Invalid request"}), 400 - password = data.get('password') - minion_id = data.get('minion_id') + password = data.get("password") + minion_id = data.get("minion_id") if password != API_PASSWORD: return jsonify({"status": "error", "message": "Unauthorized"}), 401 @@ -42,22 +51,41 @@ def accept_salt_key(): if not minion_id: return jsonify({"status": "error", "message": "Minion ID is required"}), 400 - if not re.match(r'^tc2-\d{4}$', minion_id): - return jsonify({"status": "error", "message": "Minion ID must be in the format tc2-1234"}), 400 + if not re.match(r"^tc2-\d{4}$", minion_id): + return ( + jsonify( + { + "status": "error", + "message": "Minion ID must be in the format tc2-1234", + } + ), + 400, + ) key_status = check_key_status(minion_id) if key_status == "accepted": - return jsonify({"status": "success", "message": f"Key for minion {minion_id} is already accepted"}) + return jsonify( + { + "status": "success", + "message": f"Key for minion {minion_id} is already accepted", + } + ) elif key_status == "unaccepted": if accept_key(minion_id): - return jsonify({"status": "success", "message": f"Key for minion {minion_id} accepted"}) + return jsonify( + {"status": "success", "message": f"Key for minion {minion_id} accepted"} + ) else: return jsonify({"status": "error", "message": "Failed to accept key"}), 500 elif key_status == "not_found": return jsonify({"status": "error", "message": "Minion ID not found"}), 404 else: - return jsonify({"status": "error", "message": "Failed to check key status"}), 500 + return ( + jsonify({"status": "error", "message": "Failed to check key status"}), + 500, + ) + -if __name__ == '__main__': - app.run(host='0.0.0.0', port=PORT) +if __name__ == "__main__": + app.run(host="0.0.0.0", port=PORT) diff --git a/salt/schedule-commands/schedule-commands.py b/salt/schedule-commands/schedule-commands.py index 394eb57..4cfdd21 100755 --- a/salt/schedule-commands/schedule-commands.py +++ b/salt/schedule-commands/schedule-commands.py @@ -14,6 +14,7 @@ COMMAND_FILE = "/opt/ops-tools/salt/commands.txt" + def main(): print("creating Salt client") salt_client = salt.client.LocalClient(auto_reconnect=True) @@ -34,6 +35,7 @@ def main(): print(result.stdout) print(result.stderr) + def getMinionCommand(minion_id): with open(COMMAND_FILE, "r") as file: lines = file.readlines()