From b99c7b26c54f4318167a85cf0ae3a4500349eedd Mon Sep 17 00:00:00 2001 From: Florian Paul Azim Hoberg Date: Tue, 20 Aug 2024 19:59:22 +0200 Subject: [PATCH] feature: Add storage balancing function. Fixes: #51 --- .../1.0.3/51_add_storage_balancing.yml | 2 + .../8_add_best_next_node_for_placement.yml | 2 +- proxlb | 384 +++++++++++++++--- 3 files changed, 338 insertions(+), 50 deletions(-) create mode 100644 .changelogs/1.0.3/51_add_storage_balancing.yml diff --git a/.changelogs/1.0.3/51_add_storage_balancing.yml b/.changelogs/1.0.3/51_add_storage_balancing.yml new file mode 100644 index 0000000..1c67b75 --- /dev/null +++ b/.changelogs/1.0.3/51_add_storage_balancing.yml @@ -0,0 +1,2 @@ +added: + - Add storage balancing function. [#51] diff --git a/.changelogs/1.0.3/8_add_best_next_node_for_placement.yml b/.changelogs/1.0.3/8_add_best_next_node_for_placement.yml index 6e77163..d5ac747 100644 --- a/.changelogs/1.0.3/8_add_best_next_node_for_placement.yml +++ b/.changelogs/1.0.3/8_add_best_next_node_for_placement.yml @@ -1,2 +1,2 @@ added: - - Added cli arg `-b` to return the next best node for next VM/CT placement. [#8] + - Add cli arg `-b` to return the next best node for next VM/CT placement. [#8] diff --git a/proxlb b/proxlb index aa2ca37..abec04d 100755 --- a/proxlb +++ b/proxlb @@ -177,32 +177,35 @@ def initialize_config_options(config_path): config = configparser.ConfigParser() config.read(config_path) # Proxmox config - proxlb_config['proxmox_api_host'] = config['proxmox']['api_host'] - proxlb_config['proxmox_api_user'] = config['proxmox']['api_user'] - proxlb_config['proxmox_api_pass'] = config['proxmox']['api_pass'] - proxlb_config['proxmox_api_ssl_v'] = config['proxmox']['verify_ssl'] + proxlb_config['proxmox_api_host'] = config['proxmox']['api_host'] + proxlb_config['proxmox_api_user'] = config['proxmox']['api_user'] + proxlb_config['proxmox_api_pass'] = config['proxmox']['api_pass'] + proxlb_config['proxmox_api_ssl_v'] = config['proxmox']['verify_ssl'] # VM Balancing - proxlb_config['vm_balancing_enable'] = config['vm_balancing'].get('enable', 1) - proxlb_config['vm_balancing_method'] = config['vm_balancing'].get('method', 'memory') - proxlb_config['vm_balancing_mode'] = config['vm_balancing'].get('mode', 'used') - proxlb_config['vm_balancing_mode_option'] = config['vm_balancing'].get('mode_option', 'bytes') - proxlb_config['vm_balancing_type'] = config['vm_balancing'].get('type', 'vm') - proxlb_config['vm_balanciness'] = config['vm_balancing'].get('balanciness', 10) - proxlb_config['vm_parallel_migrations'] = config['vm_balancing'].get('parallel_migrations', 1) - proxlb_config['vm_ignore_nodes'] = config['vm_balancing'].get('ignore_nodes', None) - proxlb_config['vm_ignore_vms'] = config['vm_balancing'].get('ignore_vms', None) + proxlb_config['vm_balancing_enable'] = config['vm_balancing'].get('enable', 1) + proxlb_config['vm_balancing_method'] = config['vm_balancing'].get('method', 'memory') + proxlb_config['vm_balancing_mode'] = config['vm_balancing'].get('mode', 'used') + proxlb_config['vm_balancing_mode_option'] = config['vm_balancing'].get('mode_option', 'bytes') + proxlb_config['vm_balancing_type'] = config['vm_balancing'].get('type', 'vm') + proxlb_config['vm_balanciness'] = config['vm_balancing'].get('balanciness', 10) + proxlb_config['vm_parallel_migrations'] = config['vm_balancing'].get('parallel_migrations', 1) + proxlb_config['vm_ignore_nodes'] = config['vm_balancing'].get('ignore_nodes', None) + proxlb_config['vm_ignore_vms'] = config['vm_balancing'].get('ignore_vms', None) # Storage Balancing - proxlb_config['storage_balancing_enable'] = config['storage_balancing'].get('enable', 0) + proxlb_config['storage_balancing_enable'] = config['storage_balancing'].get('enable', 0) + proxlb_config['storage_balancing_method'] = config['storage_balancing'].get('method', 'disk_space') + proxlb_config['storage_balanciness'] = config['storage_balancing'].get('balanciness', 10) + proxlb_config['storage_parallel_migrations'] = config['storage_balancing'].get('parallel_migrations', 0) # Update Support - proxlb_config['update_service'] = config['update_service'].get('enable', 0) + proxlb_config['update_service'] = config['update_service'].get('enable', 0) # API - proxlb_config['api'] = config['update_service'].get('enable', 0) + proxlb_config['api'] = config['update_service'].get('enable', 0) # Service - proxlb_config['master_only'] = config['service'].get('master_only', 0) - proxlb_config['daemon'] = config['service'].get('daemon', 1) - proxlb_config['schedule'] = config['service'].get('schedule', 24) - proxlb_config['log_verbosity'] = config['service'].get('log_verbosity', 'CRITICAL') - proxlb_config['config_version'] = config['service'].get('config_version', 2) + proxlb_config['master_only'] = config['service'].get('master_only', 0) + proxlb_config['daemon'] = config['service'].get('daemon', 1) + proxlb_config['schedule'] = config['service'].get('schedule', 24) + proxlb_config['log_verbosity'] = config['service'].get('log_verbosity', 'CRITICAL') + proxlb_config['config_version'] = config['service'].get('config_version', 2) except configparser.NoSectionError: logging.critical(f'{error_prefix} Could not find the required section.') sys.exit(2) @@ -376,14 +379,15 @@ def get_node_statistics(api_object, ignore_nodes): def get_vm_statistics(api_object, ignore_vms, balancing_type): """ Get statistics of cpu, memory and disk for each vm in the cluster. """ - info_prefix = 'Info: [vm-statistics]:' - warn_prefix = 'Warn: [vm-statistics]:' - vm_statistics = {} - ignore_vms_list = ignore_vms.split(',') - group_include = None - group_exclude = None - vm_ignore = None - vm_ignore_wildcard = False + info_prefix = 'Info: [vm-statistics]:' + warn_prefix = 'Warn: [vm-statistics]:' + vm_statistics = {} + ignore_vms_list = ignore_vms.split(',') + group_include = None + group_exclude = None + vm_ignore = None + vm_ignore_wildcard = False + _vm_details_storage_allowed = ['ide', 'nvme', 'scsi', 'virtio', 'sata', 'rootfs'] # Wildcard support: Initially validate if we need to honour # any wildcards within the vm_ignore list. @@ -420,11 +424,38 @@ def get_vm_statistics(api_object, ignore_vms, balancing_type): vm_statistics[vm['name']]['disk_used'] = vm['disk'] vm_statistics[vm['name']]['vmid'] = vm['vmid'] vm_statistics[vm['name']]['node_parent'] = node['node'] - vm_statistics[vm['name']]['type'] = 'vm' - # Rebalancing node will be overwritten after calculations. - # If the vm stays on the node, it will be removed at a - # later time. vm_statistics[vm['name']]['node_rebalance'] = node['node'] + vm_statistics[vm['name']]['storage'] = {} + vm_statistics[vm['name']]['type'] = 'vm' + + # Get disk details of the related object. + _vm_details = api_object.nodes(node['node']).qemu(vm['vmid']).config.get() + logging.info(f'{info_prefix} Getting disk information for vm {vm["name"]}.') + + for vm_detail_key, vm_detail_value in _vm_details.items(): + # vm_detail_key_validator = re.sub('\d+$', '', vm_detail_key) + vm_detail_key_validator = re.sub(r'\d+$', '', vm_detail_key) + + if vm_detail_key_validator in _vm_details_storage_allowed: + vm_statistics[vm['name']]['storage'][vm_detail_key] = {} + match = re.match(r'([^:]+):[^/]+/(.+),iothread=\d+,size=(\d+G)', _vm_details[vm_detail_key]) + + # Create an efficient match group and split the strings to assign them to the storage information. + if match: + _volume = match.group(1) + _disk_name = match.group(2) + _disk_size = match.group(3) + + vm_statistics[vm['name']]['storage'][vm_detail_key]['name'] = _disk_name + vm_statistics[vm['name']]['storage'][vm_detail_key]['device_name'] = vm_detail_key + vm_statistics[vm['name']]['storage'][vm_detail_key]['volume'] = _volume + vm_statistics[vm['name']]['storage'][vm_detail_key]['storage_parent'] = _volume + vm_statistics[vm['name']]['storage'][vm_detail_key]['storage_rebalance'] = _volume + vm_statistics[vm['name']]['storage'][vm_detail_key]['size'] = _disk_size[:-1] + logging.info(f'{info_prefix} Added disk for {vm["name"]}: Name {_disk_name} on volume {_volume} with size {_disk_size}.') + else: + logging.info(f'{info_prefix} No disks for {vm["name"]} found.') + logging.info(f'{info_prefix} Added vm {vm["name"]}.') # Add all containers if type is ct or all. @@ -458,11 +489,38 @@ def get_vm_statistics(api_object, ignore_vms, balancing_type): vm_statistics[vm['name']]['disk_used'] = vm['disk'] vm_statistics[vm['name']]['vmid'] = vm['vmid'] vm_statistics[vm['name']]['node_parent'] = node['node'] - vm_statistics[vm['name']]['type'] = 'ct' - # Rebalancing node will be overwritten after calculations. - # If the vm stays on the node, it will be removed at a - # later time. vm_statistics[vm['name']]['node_rebalance'] = node['node'] + vm_statistics[vm['name']]['storage'] = {} + vm_statistics[vm['name']]['type'] = 'ct' + + # Get disk details of the related object. + _vm_details = api_object.nodes(node['node']).lxc(vm['vmid']).config.get() + logging.info(f'{info_prefix} Getting disk information for vm {vm["name"]}.') + + for vm_detail_key, vm_detail_value in _vm_details.items(): + # vm_detail_key_validator = re.sub('\d+$', '', vm_detail_key) + vm_detail_key_validator = re.sub(r'\d+$', '', vm_detail_key) + + if vm_detail_key_validator in _vm_details_storage_allowed: + vm_statistics[vm['name']]['storage'][vm_detail_key] = {} + match = re.match(r'(?P[^:]+):(?P[^,]+),size=(?P\S+)', _vm_details[vm_detail_key]) + + # Create an efficient match group and split the strings to assign them to the storage information. + if match: + _volume = match.group(1) + _disk_name = match.group(2) + _disk_size = match.group(3) + + vm_statistics[vm['name']]['storage'][vm_detail_key]['name'] = _disk_name + vm_statistics[vm['name']]['storage'][vm_detail_key]['device_name'] = vm_detail_key + vm_statistics[vm['name']]['storage'][vm_detail_key]['volume'] = _volume + vm_statistics[vm['name']]['storage'][vm_detail_key]['storage_parent'] = _volume + vm_statistics[vm['name']]['storage'][vm_detail_key]['storage_rebalance'] = _volume + vm_statistics[vm['name']]['storage'][vm_detail_key]['size'] = _disk_size[:-1] + logging.info(f'{info_prefix} Added disk for {vm["name"]}: Name {_disk_name} on volume {_volume} with size {_disk_size}.') + else: + logging.info(f'{info_prefix} No disks for {vm["name"]} found.') + logging.info(f'{info_prefix} Added vm {vm["name"]}.') logging.info(f'{info_prefix} Created VM statistics.') @@ -496,6 +554,57 @@ def update_node_statistics(node_statistics, vm_statistics): return node_statistics +def get_storage_statistics(api_object): + """ Get statistics of all storage in the cluster. """ + info_prefix = 'Info: [storage-statistics]:' + storage_statistics = {} + + for node in api_object.nodes.get(): + + for storage in api_object.nodes(node['node']).storage.get(): + + # Only add enabled and active storage repositories that might be suitable for further + # storage balancing. + if storage['enabled'] and storage['active'] and storage['shared']: + storage_statistics[storage['storage']] = {} + storage_statistics[storage['storage']]['name'] = storage['storage'] + storage_statistics[storage['storage']]['total'] = storage['total'] + storage_statistics[storage['storage']]['used'] = storage['used'] + storage_statistics[storage['storage']]['used_percent'] = storage['used'] / storage['total'] * 100 + storage_statistics[storage['storage']]['used_percent_last_run'] = 0 + storage_statistics[storage['storage']]['free'] = storage['total'] - storage['used'] + storage_statistics[storage['storage']]['free_percent'] = storage_statistics[storage['storage']]['free'] / storage['total'] * 100 + storage_statistics[storage['storage']]['used_fraction'] = storage['used_fraction'] + storage_statistics[storage['storage']]['type'] = storage['type'] + storage_statistics[storage['storage']]['content'] = storage['content'] + storage_statistics[storage['storage']]['usage_type'] = '' + + # Split the Proxmox returned values to a list and validate the supported + # types of the underlying storage for further migrations. + storage_content_list = storage['content'].split(',') + usage_ct = False + usage_vm = False + + if 'rootdir' in storage_content_list: + usage_ct = True + storage_statistics[storage['storage']]['usage_type'] = 'ct' + logging.info(f'{info_prefix} Storage {storage["storage"]} support CTs.') + + if 'images' in storage_content_list: + usage_vm = True + storage_statistics[storage['storage']]['usage_type'] = 'vm' + logging.info(f'{info_prefix} Storage {storage["storage"]} support VMs.') + + if usage_ct and usage_vm: + storage_statistics[storage['storage']]['usage_type'] = 'all' + logging.info(f'{info_prefix} Updateing storage {storage["storage"]} support to CTs and VMs.') + + logging.info(f'{info_prefix} Added storage {storage["storage"]}.') + + logging.info(f'{info_prefix} Created storage statistics.') + return storage_statistics + + def __validate_ignore_vm_wildcard(ignore_vms): """ Validate if a wildcard is used for ignored VMs. """ if '*' in ignore_vms: @@ -584,10 +693,11 @@ def balancing_vm_calculations(balancing_method, balancing_mode, balancing_mode_o node_statistics, vm_statistics = __get_vm_tags_include_groups(vm_statistics, node_statistics, balancing_method, balancing_mode) node_statistics, vm_statistics = __get_vm_tags_exclude_groups(vm_statistics, node_statistics, balancing_method, balancing_mode) + # FIXME FIXME # Remove VMs that are not being relocated. - vms_to_remove = [vm_name for vm_name, vm_info in vm_statistics.items() if 'node_rebalance' in vm_info and vm_info['node_rebalance'] == vm_info.get('node_parent')] - for vm_name in vms_to_remove: - del vm_statistics[vm_name] + # vms_to_remove = [vm_name for vm_name, vm_info in vm_statistics.items() if 'node_rebalance' in vm_info and vm_info['node_rebalance'] == vm_info.get('node_parent')] + # for vm_name in vms_to_remove: + # del vm_statistics[vm_name] logging.info(f'{info_prefix} Balancing calculations done.') return node_statistics, vm_statistics @@ -936,6 +1046,180 @@ def run_vm_rebalancing(api_object, vm_statistics_rebalanced, app_args, parallel_ __create_cli_output(vm_statistics_rebalanced, app_args) +####################### +################# COPY +def balancing_storage_calculations(storage_balancing_method, storage_statistics, vm_statistics, balanciness, rebalance, processed_vms): + """ Calculate re-balancing of storage on present datastores across the cluster. """ + info_prefix = 'Info: [storage-rebalancing-calculator]:' + + # Validate for a supported balancing method, mode and if rebalancing is required. + __validate_vm_statistics(vm_statistics) + rebalance = __validate_storage_balanciness(balanciness, storage_balancing_method, storage_statistics) + + if rebalance: + vm_name, vm_disk_device = __get_most_used_resources_vm_storage(vm_statistics) + + if vm_name not in processed_vms: + processed_vms.append(vm_name) + resources_storage_most_free = __get_most_free_storage(storage_balancing_method, storage_statistics) + + # Update resource statistics for VMs and storage. + storage_statistics, vm_statistic = __update_resource_storage_statistics(storage_statistics, resources_storage_most_free, vm_statistics, vm_name, vm_disk_device) + + # Start recursion until we do not have any needs to rebalance anymore. + balancing_storage_calculations(storage_balancing_method, storage_statistics, vm_statistics, balanciness, rebalance, processed_vms) + + # Remove VMs where their storage is not being relocated. + # vms_to_remove = [vm_name for vm_name, vm_info in vm_statistics.items() if all(storage.get('storage_rebalance') is None for storage in vm_info.get('storage', {}).values())] + # for vm_name in vms_to_remove: + # del vm_statistics[vm_name] + + logging.info(f'{info_prefix} Balancing calculations done.') + return storage_statistics, vm_statistics + + +def __validate_storage_balanciness(balanciness, storage_balancing_method, storage_statistics): + """ Validate for balanciness of storage to ensure further rebalancing is needed. """ + info_prefix = 'Info: [storage-balanciness-validation]:' + error_prefix = 'Error: [storage-balanciness-validation]:' + storage_resource_percent_list = [] + storage_assigned_percent_match = [] + + # Validate for an allowed balancing method and define the storage resource selector. + if storage_balancing_method == 'disk_space': + logging.info(f'{info_prefix} Getting most free storage volume by disk size..') + storage_resource_selector = 'used' + elif storage_balancing_method == 'disk_io': + logging.error(f'{error_prefix} Getting most free storage volume by disk IO is not yet supported.') + sys.exit(2) + else: + logging.error(f'{error_prefix} Getting most free storage volume by disk IO is not yet supported.') + sys.exit(2) + + # Obtain the metrics + for storage_name, storage_info in storage_statistics.items(): + + logging.info(f'{info_prefix} Validating storage: {storage_name} for balanciness for usage with: {storage_balancing_method}.') + # Save information of nodes from current run to compare them in the next recursion. + if storage_statistics[storage_name][f'{storage_resource_selector}_percent_last_run'] == storage_statistics[storage_name][f'{storage_resource_selector}_percent']: + storage_statistics[storage_name][f'{storage_resource_selector}_percent_match'] = True + else: + storage_statistics[storage_name][f'{storage_resource_selector}_percent_match'] = False + + # Update value to the current value of the recursion run. + storage_statistics[storage_name][f'{storage_resource_selector}_percent_last_run'] = storage_statistics[storage_name][f'{storage_resource_selector}_percent'] + + # If all node resources are unchanged, the recursion can be left. + for key, value in storage_statistics.items(): + storage_assigned_percent_match.append(value.get(f'{storage_resource_selector}_percent_match', False)) + + if False not in storage_assigned_percent_match: + return False + + # Add node information to resource list. + storage_resource_percent_list.append(int(storage_info[f'{storage_resource_selector}_percent'])) + logging.info(f'{info_prefix} Storage: {storage_name} with values: {storage_info}') + + # Create a sorted list of the delta + balanciness between the node resources. + storage_resource_percent_list_sorted = sorted(storage_resource_percent_list) + storage_lowest_percent = storage_resource_percent_list_sorted[0] + storage_highest_percent = storage_resource_percent_list_sorted[-1] + + # Validate if the recursion should be proceeded for further rebalancing. + if (int(storage_lowest_percent) + int(balanciness)) < int(storage_highest_percent): + logging.info(f'{info_prefix} Rebalancing for type "{storage_resource_selector}" of storage is needed. Highest usage: {int(storage_highest_percent)}% | Lowest usage: {int(storage_lowest_percent)}%.') + return True + else: + logging.info(f'{info_prefix} Rebalancing for type "{storage_resource_selector}" of storage is not needed. Highest usage: {int(storage_highest_percent)}% | Lowest usage: {int(storage_lowest_percent)}%.') + return False + + +def __get_most_used_resources_vm_storage(vm_statistics): + """ Get and return the most used disk of a VM by storage. """ + info_prefix = 'Info: [get-most-used-disks-resources-vm]:' + + # Get the biggest storage of a VM/CT. A VM/CT can hold multiple disks. Therefore, we need to iterate + # over all assigned disks to get the biggest one. + vm_object = sorted( + vm_statistics.items(), + key=lambda x: max( + (size_in_bytes(storage['size']) for storage in x[1].get('storage', {}).values() if 'size' in storage), + default=0 + ), + reverse=True + ) + + vm_object = vm_object[0] + vm_name = vm_object[0] + vm_disk_device = max(vm_object[1]['storage'], key=lambda x: int(vm_object[1]['storage'][x]['size'])) + logging.info(f'{info_prefix} Got most used VM: {vm_name} with storage device: {vm_disk_device}.') + + return vm_name, vm_disk_device + + +def __get_most_free_storage(storage_balancing_method, storage_statistics): + """ Get the storage with the most free space or IO, depending on the balancing mode. """ + info_prefix = 'Info: [get-most-free-storage]:' + error_prefix = 'Error: [get-most-free-storage]:' + storage_volume = None + logging.info(f'{info_prefix} Starting to evaluate the most free storage volume.') + + if storage_balancing_method == 'disk_space': + logging.info(f'{info_prefix} Getting most free storage volume by disk space.') + storage_volume = max(storage_statistics, key=lambda x: storage_statistics[x]['free_percent']) + + if storage_balancing_method == 'disk_io': + logging.info(f'{info_prefix} Getting most free storage volume by disk IO.') + logging.error(f'{error_prefix} Getting most free storage volume by disk IO is not yet supported.') + sys.exit(2) + + return storage_volume + + +def __update_resource_storage_statistics(storage_statistics, resources_storage_most_free, vm_statistics, vm_name, vm_disk_device): + """ Update VM and storage resource statistics. """ + info_prefix = 'Info: [rebalancing-storage-resource-statistics-update]:' + current_storage = vm_statistics[vm_name]['storage'][vm_disk_device]['storage_parent'] + current_storage_size = storage_statistics[current_storage]['free'] / (1024 ** 3) + rebalance_storage = resources_storage_most_free + rebalance_storage_size = storage_statistics[rebalance_storage]['free'] / (1024 ** 3) + vm_storage_size = vm_statistics[vm_name]['storage'][vm_disk_device]['size'] + vm_storage_size_bytes = int(vm_storage_size) * 1024**3 + + # Assign new storage device to vm + logging.info(f'{info_prefix} Validating VM {vm_name} for potential storage rebalancing.') + if vm_statistics[vm_name]['storage'][vm_disk_device]['storage_rebalance'] is None and vm_statistics[vm_name]['storage'][vm_disk_device]['storage_rebalance'] != vm_statistics[vm_name]['storage'][vm_disk_device]['storage_parent']: + logging.info(f'{info_prefix} Setting VM {vm_name} from {current_storage} to {rebalance_storage} storage.') + vm_statistics[vm_name]['storage'][vm_disk_device]['storage_rebalance'] = resources_storage_most_free + else: + logging.info(f'{info_prefix} Setting VM {vm_name} from {current_storage} to {rebalance_storage} storage.') + + # Recalculate values for storage + ## Add freed resources to old parent storage device + storage_statistics[current_storage]['used'] = storage_statistics[current_storage]['used'] - vm_storage_size_bytes + storage_statistics[current_storage]['free'] = storage_statistics[current_storage]['free'] + vm_storage_size_bytes + storage_statistics[current_storage]['free_percent'] = (storage_statistics[current_storage]['free'] / storage_statistics[current_storage]['total']) * 100 + storage_statistics[current_storage]['used_percent'] = (storage_statistics[current_storage]['used'] / storage_statistics[current_storage]['total']) * 100 + logging.info(f'{info_prefix} Adding free space of {vm_storage_size}G to old storage with {current_storage_size}G. [free: {int(current_storage_size) + int(vm_storage_size)}G | {storage_statistics[current_storage]["free_percent"]}%]') + + ## Removed newly allocated resources to new rebalanced storage device + storage_statistics[rebalance_storage]['used'] = storage_statistics[rebalance_storage]['used'] + vm_storage_size_bytes + storage_statistics[rebalance_storage]['free'] = storage_statistics[rebalance_storage]['free'] - vm_storage_size_bytes + storage_statistics[rebalance_storage]['free_percent'] = (storage_statistics[rebalance_storage]['free'] / storage_statistics[rebalance_storage]['total']) * 100 + storage_statistics[rebalance_storage]['used_percent'] = (storage_statistics[rebalance_storage]['used'] / storage_statistics[rebalance_storage]['total']) * 100 + logging.info(f'{info_prefix} Adding used space of {vm_storage_size}G to new storage with {rebalance_storage_size}G. [free: {int(rebalance_storage_size) - int(vm_storage_size)}G | {storage_statistics[rebalance_storage]["free_percent"]}%]') + + logging.info(f'{info_prefix} Updated VM and storage statistics.') + return storage_statistics, vm_statistics + + +def size_in_bytes(size_str): + size_unit = size_str[-1].upper() + size_value = float(size_str) + size_multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} + return size_value * size_multipliers.get(size_unit, 1) + + def main(): """ Run ProxLB for balancing VM workloads across a Proxmox cluster. """ # Initialize PAS. @@ -965,17 +1249,19 @@ def main(): # Get metric & statistics for vms and nodes. if proxlb_config['vm_balancing_enable'] or proxlb_config['storage_balancing_enable'] or app_args.best_node: - node_statistics = get_node_statistics(api_object, proxlb_config['vm_ignore_nodes']) - vm_statistics = get_vm_statistics(api_object, proxlb_config['vm_ignore_vms'], proxlb_config['vm_balancing_type']) - node_statistics = update_node_statistics(node_statistics, vm_statistics) + node_statistics = get_node_statistics(api_object, proxlb_config['vm_ignore_nodes']) + vm_statistics = get_vm_statistics(api_object, proxlb_config['vm_ignore_vms'], proxlb_config['vm_balancing_type']) + node_statistics = update_node_statistics(node_statistics, vm_statistics) + storage_statistics = get_storage_statistics(api_object) - # Execute VM balancing sub-routines. + # Execute VM/CT balancing sub-routines. if proxlb_config['vm_balancing_enable'] or app_args.best_node: - # Calculate rebalancing of vms. - node_statistics_rebalanced, vm_statistics_rebalanced = balancing_vm_calculations(proxlb_config['vm_balancing_method'], proxlb_config['vm_balancing_mode'], proxlb_config['vm_balancing_mode_option'], - node_statistics, vm_statistics, proxlb_config['vm_balanciness'], app_args, rebalance=False, processed_vms=[]) - # Rebalance vms to new nodes within the cluster. - run_vm_rebalancing(api_object, vm_statistics_rebalanced, app_args, proxlb_config['vm_parallel_migrations']) + node_statistics, vm_statistics = balancing_vm_calculations(proxlb_config['vm_balancing_method'], proxlb_config['vm_balancing_mode'], proxlb_config['vm_balancing_mode_option'], node_statistics, vm_statistics, proxlb_config['vm_balanciness'], app_args, rebalance=False, processed_vms=[]) + run_vm_rebalancing(api_object, vm_statistics, app_args, proxlb_config['vm_parallel_migrations']) + + # Execute storage balancing sub-routines. + if proxlb_config['storage_balancing_enable']: + storage_statistics, vm_statistics = balancing_storage_calculations(proxlb_config['storage_balancing_method'], storage_statistics, vm_statistics, proxlb_config['storage_balanciness'], rebalance=False, processed_vms=[]) # Validate for any errors. post_validations()