diff --git a/cloudferry/actions/prechecks/check_networks.py b/cloudferry/actions/prechecks/check_networks.py index f8862f1a..1d91359a 100644 --- a/cloudferry/actions/prechecks/check_networks.py +++ b/cloudferry/actions/prechecks/check_networks.py @@ -11,8 +11,6 @@ # implied. # See the License for the specific language governing permissions and# # limitations under the License. - - import collections import ipaddr @@ -24,6 +22,7 @@ from cloudferry.lib.base.action import action from cloudferry.lib.os.network import neutron from cloudferry.lib.utils import log +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import proxy_client from cloudferry.lib.utils import utils @@ -46,8 +45,8 @@ class CheckNetworks(action.Action): def run(self, **kwargs): LOG.debug("Checking networks...") - overlapping_resources = {} - invalid_resources = {} + has_overlapping_resources = False + has_invalid_resources = False src_net = self.src_cloud.resources[utils.NETWORK_RESOURCE] dst_net = self.dst_cloud.resources[utils.NETWORK_RESOURCE] @@ -65,24 +64,44 @@ def run(self, **kwargs): LOG.debug("Retrieving Compute information from Source cloud...") src_compute_info = ComputeInfo(src_compute, search_opts, tenant_ids) - ext_net_map = utils.read_yaml_file(self.cfg.migrate.ext_net_map) or {} + ext_net_map = mapper.Mapper('ext_network_map') # Check external networks mapping if ext_net_map: LOG.info("Check external networks mapping...") invalid_ext_net_ids = src_net_info.get_invalid_ext_net_ids( dst_net_info, ext_net_map) - if invalid_ext_net_ids: - invalid_resources.update( - {"invalid_external_nets_ids_in_map": invalid_ext_net_ids}) + if invalid_ext_net_ids['src_nets'] or \ + invalid_ext_net_ids['dst_nets']: + invalid_src_nets = invalid_ext_net_ids['src_nets'] + invalid_dst_nets = invalid_ext_net_ids['dst_nets'] + invalid_nets_str = "" + + if invalid_src_nets: + invalid_nets_str = 'Source cloud:\n' + \ + '\n'.join(invalid_src_nets) + '\n' + if invalid_dst_nets: + invalid_nets_str += 'Destination cloud:\n' + \ + '\n'.join(invalid_dst_nets) + '\n' + + LOG.error("External networks mapping file has non-existing " + "network UUIDs defined:\n%s\nPlease update '%s' " + "file with correct values and re-run networks " + "check.", + invalid_nets_str, self.cfg.migrate.ext_net_map) + has_invalid_resources = True # Check networks' segmentation IDs overlap LOG.info("Check networks' segmentation IDs overlapping...") nets_overlapping_seg_ids = (src_net_info.get_overlapping_seg_ids( dst_net_info)) if nets_overlapping_seg_ids: - LOG.warning("Networks with segmentation IDs overlapping:\n%s", - nets_overlapping_seg_ids) + LOG.warning("Segmentation IDs for these networks in source cloud " + "WILL NOT BE KEPT regardless of options defined in " + "config, because networks with the same segmentation " + "IDs already exist in destination: %s.", + '\n'.join([n['src_net_id'] + for n in nets_overlapping_seg_ids])) # Check external subnets overlap LOG.info("Check external subnets overlapping...") @@ -90,24 +109,61 @@ def run(self, **kwargs): src_net_info.get_overlapping_external_subnets(dst_net_info, ext_net_map)) if overlapping_external_subnets: - overlapping_resources.update( - {"overlapping_external_subnets": overlapping_external_subnets}) + pool_fmt = '"{pool}" pool of subnet "{snet_name}" ({snet_id})' + fmt = "{src_pool} overlaps with {dst_pool}" + overlapping_nets = [] + + for snet in overlapping_external_subnets: + overlapping_nets.append( + fmt.format( + src_pool=pool_fmt.format( + pool=snet['src_subnet']['allocation_pools'], + snet_name=snet['src_subnet']['name'], + snet_id=snet['src_subnet']['id']), + dst_pool=pool_fmt.format( + pool=snet['dst_subnet']['allocation_pools'], + snet_name=snet['dst_subnet']['name'], + snet_id=snet['dst_subnet']['id'], + ))) + + message = ("Following external networks have overlapping " + "allocation pools in source and destination:\n{}.\nTo " + "resolve this:\n" + " 1. Manually change allocation pools in source or " + "destination networks to be identical;\n" + " 2. Use '[migrate] ext_net_map' external networks " + "mapping. Floating IPs will NOT BE KEPT in that " + "case.".format('\n'.join(overlapping_nets))) + + LOG.error(message) + has_overlapping_resources = True # Check floating IPs overlap LOG.info("Check floating IPs overlapping...") floating_ips = src_net_info.list_overlapping_floating_ips(dst_net_info, ext_net_map) if floating_ips: - overlapping_resources.update( - {'overlapping_floating_ips': floating_ips}) + LOG.error("Following floating IPs from source cloud already exist " + "in destination, but either tenant, or external " + "network doesn't match source cloud floating IP: %s\n" + "In order to resolve you'd need to either delete " + "floating IP from destination, or recreate floating " + "IP so that they match fully in source and destination.", + '\n'.join(floating_ips)) + has_overlapping_resources = True # Check busy physical networks on DST of FLAT network type LOG.info("Check busy physical networks for FLAT network type...") busy_flat_physnets = src_net_info.busy_flat_physnets(dst_net_info, ext_net_map) if busy_flat_physnets: - overlapping_resources.update( - {'busy_flat_physnets': busy_flat_physnets}) + LOG.error("Flat network(s) allocated in different physical " + "network(s) exist in destination cloud:\n%s\nIn order " + "to resolve flat networks in the list must be " + "connected to the same physical network in source and " + "destination.", + '\n'.join([str(n) for n in busy_flat_physnets])) + has_overlapping_resources = True # Check physical networks existence on DST for VLAN network type LOG.info("Check physical networks existence for VLAN network type...") @@ -115,29 +171,28 @@ def run(self, **kwargs): missing_vlan_physnets = src_net_info.missing_vlan_physnets( dst_net_info, dst_neutron_client, ext_net_map) if missing_vlan_physnets: - overlapping_resources.update( - {'missing_vlan_physnets': missing_vlan_physnets}) + LOG.error("Following physical networks are not present in " + "destination, but required by source cloud networks: " + "%s\nIn order to resolve make sure neutron has " + "required physical networks defined in config.", + '\n'.join(missing_vlan_physnets)) + + has_overlapping_resources = True # Check VMs spawned directly in external network LOG.info("Check VMs spawned directly in external networks...") devices = src_net_info.get_devices_from_external_networks() vms_list = src_compute_info.list_vms_in_external_network(devices) if vms_list: - LOG.warning('Some VMs are booted directly in external networks: ' - '%s', vms_list) + LOG.warning('Following VMs are booted directly in external ' + 'network, which is not recommended: %s', vms_list) # Print LOG message with all overlapping stuff and abort migration - if overlapping_resources or invalid_resources: - if overlapping_resources: - LOG.critical('Network overlapping list:\n%s', - overlapping_resources) - if invalid_resources: - LOG.critical('Invalid Network resources list:\n%s', - invalid_resources) + if has_overlapping_resources or has_invalid_resources: raise exception.AbortMigrationError( - "There is a number of overlapping/invalid Network resources, " - "so migration process can not be continued. Resolve it please " - "and try again") + "There is a number of overlapping/invalid network resources " + "which require manual resolution. See error messages above " + "for details.") class ComputeInfo(object): @@ -292,11 +347,11 @@ def get_overlapping_external_subnets(self, dst_info, ext_net_map): src_subnet['id'], src_subnet['network_id']) continue - overlap = {'src_subnet': src_subnet['id'], - 'dst_subnet': dst_subnet['id']} - LOG.warning("Allocation pool of subnet '%s' on SRC overlaps " - "with allocation pool of subnet '%s' on DST.", - src_subnet['id'], dst_subnet['id']) + overlap = {'src_subnet': src_subnet, + 'dst_subnet': dst_subnet} + LOG.debug("Allocation pool of subnet '%s' on SRC overlaps " + "with allocation pool of subnet '%s' on DST.", + src_subnet['id'], dst_subnet['id']) overlapping_external_subnets.append(overlap) return overlapping_external_subnets @@ -367,7 +422,7 @@ def busy_flat_physnets(self, dst_info, ext_net_map): continue if network.physnet in dst_flat_physnets: - busy_flat_physnets.append(network.physnet) + busy_flat_physnets.append(network) return busy_flat_physnets @@ -444,7 +499,7 @@ def get_invalid_ext_net_ids(self, dst_info, ext_net_map): 'dst_nets': [, , ...]} """ - invalid_ext_nets = {} + invalid_ext_nets = {'src_nets': [], 'dst_nets': []} src_ext_nets_ids = [net.id for net in self.by_id.itervalues() if net.external] @@ -453,9 +508,9 @@ def get_invalid_ext_net_ids(self, dst_info, ext_net_map): for src_net_id, dst_net_id in ext_net_map.iteritems(): if src_net_id not in src_ext_nets_ids: - invalid_ext_nets.setdefault('src_nets', []).append(src_net_id) + invalid_ext_nets['src_nets'].append(src_net_id) if dst_net_id not in dst_ext_nets_ids: - invalid_ext_nets.setdefault('dst_nets', []).append(dst_net_id) + invalid_ext_nets['dst_nets'].append(dst_net_id) return invalid_ext_nets @@ -473,6 +528,10 @@ def __init__(self, info): self.seg_id = self.info["provider:segmentation_id"] self.physnet = self.info["provider:physical_network"] + def __str__(self): + return "{name} ({uuid}), physnet: {phy}".format( + name=self.info['name'], uuid=self.id, phy=self.physnet) + def add_subnet(self, info): self.subnets.append(info) diff --git a/cloudferry/cfglib.py b/cloudferry/cfglib.py index ddb76ae6..8f5b80ff 100644 --- a/cloudferry/cfglib.py +++ b/cloudferry/cfglib.py @@ -130,7 +130,12 @@ help="Path to YAML file which maps source cloud external " "networks to destination cloud external networks. " "Required in case external networks in source and " - "destination don't match."), + "destination don't match.", + deprecated_for_removal=True, + deprecated_reason='Please use resource_map option'), + cfg.StrOpt('resource_map', default='configs/resource_map.yaml', + help="Path to YAML file which maps source cloud objects to " + "destination cloud objects."), cfg.BoolOpt('keep_floatingip', default=False, help='Specifies whether floating IPs will be kept the same ' 'in destination cloud. Requires low-level neutron DB ' diff --git a/cloudferry/lib/base/action/action.py b/cloudferry/lib/base/action/action.py index cc4998e1..f588dcd5 100644 --- a/cloudferry/lib/base/action/action.py +++ b/cloudferry/lib/base/action/action.py @@ -14,6 +14,7 @@ from cloudferry.lib.scheduler import task +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import utils @@ -48,6 +49,7 @@ def get_similar_tenants(self): src_identity = self.src_cloud.resources[utils.IDENTITY_RESOURCE] dst_identity = self.dst_cloud.resources[utils.IDENTITY_RESOURCE] + tenant_name_map = mapper.Mapper('tenant_map') src_tenants = src_identity.get_tenants_list() dst_tenants = dst_identity.get_tenants_list() @@ -58,7 +60,7 @@ def get_similar_tenants(self): similar_tenants = {} for src_tenant in src_tenants: - src_tnt_name = src_tenant.name.lower() + src_tnt_name = tenant_name_map.map(src_tenant.name).lower() if src_tnt_name in dst_tenant_map: similar_tenants[src_tenant.id] = dst_tenant_map[src_tnt_name] diff --git a/cloudferry/lib/copy_engines/scp_copier.py b/cloudferry/lib/copy_engines/scp_copier.py index 7faa97ef..82d8a8c9 100644 --- a/cloudferry/lib/copy_engines/scp_copier.py +++ b/cloudferry/lib/copy_engines/scp_copier.py @@ -111,23 +111,23 @@ def transfer(self, data): dst_temp_dir = os.path.join(os.path.basename(path_dst), '.cf.copy') partial_files = [] - with files.RemoteDir(src_runner, src_temp_dir) as src_temp, \ - files.RemoteDir(dst_runner, dst_temp_dir) as dst_temp: + with files.FullAccessRemoteDir(src_runner, src_temp_dir) as src_tmp, \ + files.FullAccessRemoteDir(dst_runner, dst_temp_dir) as dst_tmp: for i in xrange(num_blocks): part = os.path.basename(path_src) + '.part{i}'.format(i=i) - part_path = os.path.join(src_temp.dirname, part) + part_path = os.path.join(src_tmp.dirname, part) files.remote_split_file(src_runner, path_src, part_path, i, block_size) gzipped_path = files.remote_gzip(src_runner, part_path) gzipped_filename = os.path.basename(gzipped_path) - dst_gzipped_path = os.path.join(dst_temp.dirname, + dst_gzipped_path = os.path.join(dst_tmp.dirname, gzipped_filename) self.run_scp(host_src, gzipped_path, host_dst, dst_gzipped_path, gateway) files.remote_unzip(dst_runner, dst_gzipped_path) - partial_files.append(os.path.join(dst_temp.dirname, part)) + partial_files.append(os.path.join(dst_tmp.dirname, part)) for i in xrange(num_blocks): files.remote_join_file(dst_runner, path_dst, partial_files[i], diff --git a/cloudferry/lib/os/compute/nova_compute.py b/cloudferry/lib/os/compute/nova_compute.py index b8ca68a9..7fe346f8 100644 --- a/cloudferry/lib/os/compute/nova_compute.py +++ b/cloudferry/lib/os/compute/nova_compute.py @@ -30,11 +30,12 @@ from cloudferry.lib.os.identity import keystone from cloudferry.lib.scheduler import signal_handler from cloudferry.lib.utils import log +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import mysql_connector from cloudferry.lib.utils import node_ip from cloudferry.lib.utils import override from cloudferry.lib.utils import proxy_client -from cloudferry.lib.utils import utils as utl +from cloudferry.lib.utils import utils from cloudferry.lib.os.compute.usage_quota import UsageQuotaCompute LOG = log.getLogger(__name__) @@ -90,6 +91,7 @@ def __init__(self, config, cloud): # List of instance IDs which failed to create self.processing_instances = [] self.failed_instances = [] + self.tenant_name_map = mapper.Mapper('tenant_map') self.instance_info_caches = instance_info_caches.InstanceInfoCaches( self.get_db_connection()) if config.migrate.override_rules is None: @@ -230,10 +232,9 @@ def read_info(self, target='instances', **kwargs): return info - @staticmethod - def convert_instance(instance, cfg, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - compute_res = cloud.resources[utl.COMPUTE_RESOURCE] + def convert_instance(self, instance, cfg, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + compute_res = cloud.resources[utils.COMPUTE_RESOURCE] sg_res = server_groups.ServerGroupsHandler(cloud) instance_name = instance_libvirt_name(instance) @@ -261,16 +262,16 @@ def convert_instance(instance, cfg, cloud): instance_node, ssh_user) - if not utl.libvirt_instance_exists(instance_name, - cfg.cloud.ssh_host, - instance_node, - ssh_user, - cfg.cloud.ssh_sudo_password): + if not utils.libvirt_instance_exists(instance_name, + cfg.cloud.ssh_host, + instance_node, + ssh_user, + cfg.cloud.ssh_sudo_password): LOG.warning('Instance %s (%s) not found on %s, skipping migration', instance_name, instance.id, instance_node) return None - instance_block_info = utl.get_libvirt_block_info( + instance_block_info = utils.get_libvirt_block_info( instance_name, cfg.cloud.ssh_host, instance_node, @@ -288,7 +289,7 @@ def convert_instance(instance, cfg, cloud): instance.id) is_ephemeral = flav_details['ephemeral_gb'] > 0 if is_ephemeral: - ephemeral_path['path_src'] = utl.get_disk_path( + ephemeral_path['path_src'] = utils.get_disk_path( instance, instance_block_info, disk=DISK + LOCAL) @@ -301,7 +302,7 @@ def convert_instance(instance, cfg, cloud): } if instance.image: - diff['path_src'] = utl.get_disk_path( + diff['path_src'] = utils.get_disk_path( instance, instance_block_info) flav_name = compute_res.get_flavor_from_id(instance.flavor['id'], @@ -316,21 +317,22 @@ def convert_instance(instance, cfg, cloud): else: server_group = None - config_drive = utl.get_disk_path(instance, instance_block_info, - disk=utl.DISK_CONFIG) + config_drive = utils.get_disk_path(instance, instance_block_info, + disk=utils.DISK_CONFIG) inst = {'instance': {'name': instance.name, 'instance_name': instance_name, 'id': instance.id, 'tenant_id': instance.tenant_id, - 'tenant_name': tenant_name, + 'tenant_name': self.tenant_name_map.map( + tenant_name), 'status': instance.status, 'flavor_id': instance.flavor['id'], 'flav_details': flav_details, 'image_id': instance.image[ 'id'] if instance.image else None, - 'boot_mode': (utl.BOOT_FROM_IMAGE + 'boot_mode': (utils.BOOT_FROM_IMAGE if instance.image - else utl.BOOT_FROM_VOLUME), + else utils.BOOT_FROM_VOLUME), 'key_name': instance.key_name, 'availability_zone': getattr( instance, @@ -358,7 +360,7 @@ def convert_resources(compute_obj, cloud): if isinstance(compute_obj, nova_client.flavors.Flavor): - compute_res = cloud.resources[utl.COMPUTE_RESOURCE] + compute_res = cloud.resources[utils.COMPUTE_RESOURCE] tenants = [] if not compute_obj.is_public: @@ -402,12 +404,11 @@ def convert_resources(compute_obj, cloud): 'metadata_items': compute_obj.metadata_items}, 'meta': {}} - @staticmethod - def convert(obj, cfg=None, cloud=None): + def convert(self, obj, cfg=None, cloud=None): if isinstance(obj, nova_client.servers.Server): - return NovaCompute.convert_instance(obj, cfg, cloud) + return self.convert_instance(obj, cfg, cloud) elif isinstance(obj, nova_client.flavors.Flavor): - return NovaCompute.convert_resources(obj, cloud) + return self.convert_resources(obj, cloud) LOG.error('NovaCompute converter has received incorrect value. Please ' 'pass to it only instance or flavor objects.') @@ -633,9 +634,9 @@ def _deploy_instance(self, instance, availability_zone): instance, 'server_group') }, } - if instance['boot_mode'] == utl.BOOT_FROM_VOLUME: + if instance['boot_mode'] == utils.BOOT_FROM_VOLUME: volume_id = instance['volumes'][0]['id'] - storage = self.cloud.resources[utl.STORAGE_RESOURCE] + storage = self.cloud.resources[utils.STORAGE_RESOURCE] vol = storage.get_migrated_volume(volume_id) if vol: @@ -926,7 +927,7 @@ def get_status(self, res_id): return self.nova_client.servers.get(res_id).status def get_networks(self, instance): - network_resource = self.cloud.resources[utl.NETWORK_RESOURCE] + network_resource = self.cloud.resources[utils.NETWORK_RESOURCE] interfaces = network_resource.get_instance_network_info( instance.id) if self.config.migrate.keep_network_interfaces_order: diff --git a/cloudferry/lib/os/compute/server_groups.py b/cloudferry/lib/os/compute/server_groups.py index 201fc09f..15a0af77 100644 --- a/cloudferry/lib/os/compute/server_groups.py +++ b/cloudferry/lib/os/compute/server_groups.py @@ -32,6 +32,7 @@ from cloudferry.lib.base import compute from cloudferry.lib.os.identity import keystone from cloudferry.lib.utils import log +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import proxy_client from cloudferry.lib.utils import utils @@ -64,6 +65,7 @@ def __init__(self, cloud): self.compute = self.cloud.resources[utils.COMPUTE_RESOURCE] self.identity = self.cloud.resources[utils.IDENTITY_RESOURCE] self.config = copy.deepcopy(self.identity.config) + self.tenant_name_map = mapper.Mapper('tenant_map') def _execute(self, sql): """ @@ -118,7 +120,7 @@ def get_server_groups(self): groups.append( {"user": self.identity.try_get_username_by_id(row[0]), - "tenant": tenant_name, + "tenant": self.tenant_name_map.map(tenant_name), "uuid": row[2], "name": row[3], "policies": policies}) diff --git a/cloudferry/lib/os/discovery/nova.py b/cloudferry/lib/os/discovery/nova.py index d5055dae..47437b39 100644 --- a/cloudferry/lib/os/discovery/nova.py +++ b/cloudferry/lib/os/discovery/nova.py @@ -151,6 +151,8 @@ def load_from_cloud(self, data): server_image = None if data.image: server_image = data.image['id'] + attached_volumes = [self.find_ref(storage.Attachment, attachment) + for attachment in raw_attachments] server_dict = { 'object_id': self.make_id(data.id), 'security_groups': [], # TODO: implement security groups @@ -162,8 +164,7 @@ def load_from_cloud(self, data): 'host': getattr(data, EXT_ATTR_HOST), 'hypervisor_hostname': getattr(data, EXT_ATTR_HYPER_HOST), 'instance_name': getattr(data, EXT_ATTR_INSTANCE_NAME), - 'attached_volumes': [self.find_ref(storage.Attachment, attachment) - for attachment in raw_attachments], + 'attached_volumes': [av for av in attached_volumes if av], 'ephemeral_disks': [], # Ephemeral disks will be filled later } for attr_name in ('name', 'status', 'user_id', 'key_name', diff --git a/cloudferry/lib/os/estimation/procedures.py b/cloudferry/lib/os/estimation/procedures.py index 26433e8c..7cf5c45e 100644 --- a/cloudferry/lib/os/estimation/procedures.py +++ b/cloudferry/lib/os/estimation/procedures.py @@ -60,7 +60,8 @@ def run(self): for ephemeral_disk in server.ephemeral_disks: ephemeral_count += 1 ephemeral_size += ephemeral_disk.size - for volume in server.attached_volumes: + for obj in server.attached_volumes: + volume = obj.volume volumes_count += 1 volumes_size += volume.size * G used_volumes.add(volume.object_id) diff --git a/cloudferry/lib/os/identity/keystone.py b/cloudferry/lib/os/identity/keystone.py index bcd09228..1d0b3701 100644 --- a/cloudferry/lib/os/identity/keystone.py +++ b/cloudferry/lib/os/identity/keystone.py @@ -23,6 +23,7 @@ from cloudferry import cfglib from cloudferry.lib.base import identity from cloudferry.lib.utils import log +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import proxy_client from cloudferry.lib.utils import retrying from cloudferry.lib.utils import utils as utl @@ -107,13 +108,13 @@ def __init__(self, config, cloud): self.templater = Templater() self.generator = GeneratorPassword() self.defaults = {} + self.tenant_name_map = mapper.Mapper('tenant_map') @property def keystone_client(self): return self.proxy(self.get_client(), self.config) - @staticmethod - def convert(identity_obj, cfg): + def convert(self, identity_obj, cfg): """Convert OpenStack Keystone object to CloudFerry object. :param identity_obj: Direct OpenStack Keystone object to convert, @@ -122,10 +123,15 @@ def convert(identity_obj, cfg): """ if isinstance(identity_obj, keystone_client.tenants.Tenant): - return {'tenant': {'name': identity_obj.name, - 'id': identity_obj.id, - 'description': identity_obj.description}, - 'meta': {}} + return { + 'tenant': { + 'name': self.tenant_name_map.map( + identity_obj.name), + 'id': identity_obj.id, + 'description': identity_obj.description + }, + 'meta': {} + } elif isinstance(identity_obj, keystone_client.users.User): overwrite_user_passwords = cfg.migrate.overwrite_user_passwords @@ -644,9 +650,11 @@ def _get_user_roles(user_id, tenant_id): def _get_user_tenants_roles_by_db(self, tenant_list, user_list): user_tenants_roles = { - u.name.lower(): {t.name.lower(): [] for t in tenant_list} + u.name.lower(): {self.tenant_name_map.map(t.name).lower(): [] + for t in tenant_list} for u in user_list} - tenant_ids = {tenant.id: tenant.name.lower() for tenant in tenant_list} + tenant_ids = {tenant.id: self.tenant_name_map.map(tenant.name).lower() + for tenant in tenant_list} user_ids = {user.id: user.name.lower() for user in user_list} roles = {r.id: r for r in self.get_roles_list()} for user_id, tenant_id, roles_field in self._get_roles_sql_request(): @@ -671,10 +679,11 @@ def _get_user_tenants_roles_by_api(self, tenant_list, user_list): for user in user_list: user_tenants_roles[user.name.lower()] = {} for tenant in tenant_list: + tenant_name = self.tenant_name_map.map(tenant.name) roles = [] for role in self.roles_for_user(user.id, tenant.id): roles.append({'role': {'name': role.name, 'id': role.id}}) - user_tenants_roles[user.name.lower()][tenant.name.lower()] = \ + user_tenants_roles[user.name.lower()][tenant_name.lower()] = \ roles return user_tenants_roles @@ -752,13 +761,16 @@ def get_db_version(self): for raw in res: return raw['version'] - @staticmethod - def identical(src_tenant, dst_tenant): - if not src_tenant: - src_tenant = {'name': cfglib.CONF.src.tenant} - if not dst_tenant: - dst_tenant = {'name': cfglib.CONF.dst.tenant} - return src_tenant['name'].lower() == dst_tenant['name'].lower() + def identical(self, src_tenant, dst_tenant): + if src_tenant: + src_tenant_name = self.tenant_name_map.map(src_tenant['name']) + else: + src_tenant_name = cfglib.CONF.src.tenant + if dst_tenant: + dst_tenant_name = dst_tenant['name'] + else: + dst_tenant_name = cfglib.CONF.dst.tenant + return src_tenant_name.lower() == dst_tenant_name.lower() def get_dst_tenant_from_src_tenant_id(src_keystone, dst_keystone, @@ -773,7 +785,9 @@ def get_dst_tenant_from_src_tenant_id(src_keystone, dst_keystone, try: with proxy_client.expect_exception(ks_exceptions.NotFound): client = dst_keystone.keystone_client - return client.tenants.find(name=src_tenant.name) + dst_tenant_name = src_keystone.tenant_name_map.map(src_tenant.name) + return find_by_name('tenant', client.tenants.list(), + dst_tenant_name) except ks_exceptions.NotFound: return None diff --git a/cloudferry/lib/os/image/glance_image.py b/cloudferry/lib/os/image/glance_image.py index bd5f93ea..5e45a4f7 100644 --- a/cloudferry/lib/os/image/glance_image.py +++ b/cloudferry/lib/os/image/glance_image.py @@ -30,6 +30,7 @@ from cloudferry.lib.utils import file_proxy from cloudferry.lib.utils import filters from cloudferry.lib.utils import log +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import proxy_client from cloudferry.lib.utils import retrying from cloudferry.lib.utils import remote_runner @@ -120,6 +121,7 @@ def __init__(self, config, cloud): self.runner = remote_runner.RemoteRunner(self.ssh_host, self.config.cloud.ssh_user) self._image_filter = None + self.tenant_name_map = mapper.Mapper('tenant_map') super(GlanceImage, self).__init__(config) def get_image_filter(self): @@ -317,11 +319,12 @@ def convert(self, glance_image, cloud): # at this point we write name of owner of this tenant # to map it to different tenant id on destination - gl_image.update( - {'owner_name': keystone.try_get_tenant_name_by_id( - glance_image.owner, default=cloud.cloud_config.cloud.tenant)}) - gl_image.update({ - "members": self.get_members({gl_image['id']: {'image': gl_image}}) + gl_image['owner_name'] = self.tenant_name_map.map( + keystone.try_get_tenant_name_by_id( + glance_image.owner, + default=cloud.cloud_config.cloud.tenant)) + gl_image['members'] = self.get_members({ + gl_image['id']: {'image': gl_image} }) if self.is_snapshot(glance_image): @@ -349,8 +352,9 @@ def get_members(self, images): if img not in result: result[img] = {} - tenant_name = self.identity_client.try_get_tenant_name_by_id( - entry.member_id, default=self.config.cloud.tenant) + tenant_name = self.tenant_name_map.map( + self.identity_client.try_get_tenant_name_by_id( + entry.member_id, default=self.config.cloud.tenant)) result[img][tenant_name] = entry.can_share return result diff --git a/cloudferry/lib/os/network/neutron.py b/cloudferry/lib/os/network/neutron.py index a62c55b4..d1f42f99 100644 --- a/cloudferry/lib/os/network/neutron.py +++ b/cloudferry/lib/os/network/neutron.py @@ -27,7 +27,8 @@ from cloudferry.lib.os.identity import keystone as ksresource from cloudferry.lib.utils import cache from cloudferry.lib.utils import log -from cloudferry.lib.utils import utils as utl +from cloudferry.lib.utils import mapper +from cloudferry.lib.utils import utils LOG = log.getLogger(__name__) @@ -46,10 +47,10 @@ class NeutronNetwork(network.Network): def __init__(self, config, cloud): super(NeutronNetwork, self).__init__(config) self.cloud = cloud - self.identity_client = cloud.resources[utl.IDENTITY_RESOURCE] + self.identity_client = cloud.resources[utils.IDENTITY_RESOURCE] self.filter_tenant_id = None - self.ext_net_map = \ - utl.read_yaml_file(self.config.migrate.ext_net_map) or {} + self.ext_net_map = mapper.Mapper('ext_network_map') + self.tenant_name_map = mapper.Mapper('tenant_map') self.mysql_connector = cloud.mysql_connector('neutron') @property @@ -151,12 +152,14 @@ def get_quota(self, tenant_id): data = {} if self.config.network.get_all_quota: for t_id, t_val in tenants.iteritems(): - data[t_val] = self.neutron_client.show_quota(t_id) + tenant_name = self.tenant_name_map.map(t_val) + data[tenant_name] = self.neutron_client.show_quota(t_id) else: for t in self.neutron_client.list_quotas()['quotas']: if (not tenant_id) or (tenant_id == t['tenant_id']): - tenant_name = self.identity_client.\ + t_val = self.identity_client.\ try_get_tenant_name_by_id(t['tenant_id']) + tenant_name = self.tenant_name_map.map(t_val) data[tenant_name] = {k: v for k, v in t.iteritems() if k != 'tenant_id'} @@ -367,8 +370,7 @@ def check_existing_port(self, network_id, mac=None, ip_address=None, return port return None - @staticmethod - def convert(neutron_object, cloud, obj_name): + def convert(self, neutron_object, cloud, obj_name): """Convert OpenStack Neutron network object to CloudFerry object. :param neutron_object: Direct OS NeutronNetwork object to convert, @@ -380,23 +382,23 @@ def convert(neutron_object, cloud, obj_name): """ obj_map = { - 'network': NeutronNetwork.convert_networks, - 'subnet': NeutronNetwork.convert_subnets, - 'router': NeutronNetwork.convert_routers, - 'floating_ip': NeutronNetwork.convert_floatingips, - 'security_group': NeutronNetwork.convert_security_groups, - 'rule': NeutronNetwork.convert_rules, - 'lb_pool': NeutronNetwork.convert_lb_pools, - 'lb_member': NeutronNetwork.convert_lb_members, - 'lb_monitor': NeutronNetwork.convert_lb_monitors, - 'lb_vip': NeutronNetwork.convert_lb_vips + 'network': self.convert_networks, + 'subnet': self.convert_subnets, + 'router': self.convert_routers, + 'floating_ip': self.convert_floatingips, + 'security_group': self.convert_security_groups, + 'rule': self.convert_rules, + 'lb_pool': self.convert_lb_pools, + 'lb_member': self.convert_lb_members, + 'lb_monitor': self.convert_lb_monitors, + 'lb_vip': self.convert_lb_vips } return obj_map[obj_name](neutron_object, cloud) def convert_networks(self, net, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() subnets = [] @@ -413,7 +415,8 @@ def convert_networks(self, net, cloud): 'admin_state_up': net['admin_state_up'], 'shared': net['shared'], 'tenant_id': net['tenant_id'], - 'tenant_name': get_tenant_name(net['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(net['tenant_id'])), 'subnets': subnets, 'router:external': net['router:external'], 'provider:physical_network': net['provider:physical_network'], @@ -434,10 +437,9 @@ def convert_networks(self, net, cloud): result['res_hash'] = res_hash return result - @staticmethod - def convert_subnets(snet, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - network_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_subnets(self, snet, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + network_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() networks_list = network_res.get_networks_list() @@ -456,7 +458,8 @@ def convert_subnets(snet, cloud): 'network_name': net['name'], 'external': net['router:external'], 'network_id': snet['network_id'], - 'tenant_name': get_tenant_name(snet['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(snet['tenant_id'])), 'dns_nameservers': snet['dns_nameservers'], 'meta': {}, } @@ -475,10 +478,9 @@ def convert_subnets(snet, cloud): return result - @staticmethod - def convert_routers(router, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_routers(self, router, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() @@ -487,7 +489,8 @@ def convert_routers(router, cloud): 'id': router['id'], 'admin_state_up': router['admin_state_up'], 'external_gateway_info': router['external_gateway_info'], - 'tenant_name': get_tenant_name(router['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(router['tenant_id'])), 'meta': {}, } result.update(net_res.get_ports_info(router)) @@ -511,10 +514,9 @@ def convert_routers(router, cloud): return result - @staticmethod - def convert_floatingips(floating, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_floatingips(self, floating, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func() @@ -527,8 +529,10 @@ def convert_floatingips(floating, cloud): 'tenant_id': floating['tenant_id'], 'floating_network_id': ext_id, 'network_name': extnet['name'], - 'ext_net_tenant_name': get_tenant_name(extnet['tenant_id']), - 'tenant_name': get_tenant_name(floating['tenant_id']), + 'ext_net_tenant_name': self.tenant_name_map.map( + get_tenant_name(extnet['tenant_id'])), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(floating['tenant_id'])), 'fixed_ip_address': floating['fixed_ip_address'], 'floating_ip_address': floating['floating_ip_address'], 'port_id': floating['port_id'], @@ -539,7 +543,7 @@ def convert_floatingips(floating, cloud): @staticmethod def convert_rules(rule, cloud): - net_res = cloud.resources[utl.NETWORK_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] rule_hash = net_res.get_resource_hash(rule, 'direction', @@ -564,10 +568,9 @@ def convert_rules(rule, cloud): return result - @staticmethod - def convert_security_groups(sec_gr, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_security_groups(self, sec_gr, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) @@ -576,9 +579,10 @@ def convert_security_groups(sec_gr, cloud): 'name': sec_gr['name'], 'id': sec_gr['id'], 'tenant_id': sec_gr['tenant_id'], - 'tenant_name': get_tenant_name(sec_gr['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(sec_gr['tenant_id'])), 'description': sec_gr['description'], - 'security_group_rules': [NeutronNetwork.convert(gr, cloud, 'rule') + 'security_group_rules': [self.convert(gr, cloud, 'rule') for gr in sec_gr['security_group_rules']], 'meta': {}, } @@ -592,10 +596,9 @@ def convert_security_groups(sec_gr, cloud): return result - @staticmethod - def convert_lb_pools(pool, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_lb_pools(self, pool, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) @@ -609,7 +612,8 @@ def convert_lb_pools(pool, cloud): 'subnet_id': pool['subnet_id'], 'provider': pool.get('provider'), 'tenant_id': pool['tenant_id'], - 'tenant_name': get_tenant_name(pool['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(pool['tenant_id'])), 'health_monitors': pool['health_monitors'], 'members': pool['members'], 'meta': {} @@ -624,10 +628,9 @@ def convert_lb_pools(pool, cloud): return result - @staticmethod - def convert_lb_monitors(monitor, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_lb_monitors(self, monitor, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) @@ -635,7 +638,8 @@ def convert_lb_monitors(monitor, cloud): result = { 'id': monitor['id'], 'tenant_id': monitor['tenant_id'], - 'tenant_name': get_tenant_name(monitor['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(monitor['tenant_id'])), 'type': monitor['type'], 'delay': monitor['delay'], 'timeout': monitor['timeout'], @@ -657,10 +661,9 @@ def convert_lb_monitors(monitor, cloud): return result - @staticmethod - def convert_lb_members(member, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_lb_members(self, member, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) @@ -672,7 +675,8 @@ def convert_lb_members(member, cloud): 'protocol_port': member['protocol_port'], 'weight': member['weight'], 'tenant_id': member['tenant_id'], - 'tenant_name': get_tenant_name(member['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(member['tenant_id'])), 'meta': {} } @@ -686,10 +690,9 @@ def convert_lb_members(member, cloud): return result - @staticmethod - def convert_lb_vips(vip, cloud): - identity_res = cloud.resources[utl.IDENTITY_RESOURCE] - net_res = cloud.resources[utl.NETWORK_RESOURCE] + def convert_lb_vips(self, vip, cloud): + identity_res = cloud.resources[utils.IDENTITY_RESOURCE] + net_res = cloud.resources[utils.NETWORK_RESOURCE] get_tenant_name = identity_res.get_tenants_func( return_default_tenant=False) @@ -706,7 +709,8 @@ def convert_lb_vips(vip, cloud): 'session_persistence': vip.get('session_persistence', None), 'tenant_id': vip['tenant_id'], 'subnet_id': vip['subnet_id'], - 'tenant_name': get_tenant_name(vip['tenant_id']), + 'tenant_name': self.tenant_name_map.map( + get_tenant_name(vip['tenant_id'])), 'meta': {} } diff --git a/cloudferry/lib/os/storage/cinder_storage.py b/cloudferry/lib/os/storage/cinder_storage.py index afb32373..78d95f27 100644 --- a/cloudferry/lib/os/storage/cinder_storage.py +++ b/cloudferry/lib/os/storage/cinder_storage.py @@ -22,6 +22,7 @@ from cloudferry.lib.os.storage import filters as cinder_filters from cloudferry.lib.utils import filters from cloudferry.lib.utils import log +from cloudferry.lib.utils import mapper from cloudferry.lib.utils import proxy_client from cloudferry.lib.utils import retrying from cloudferry.lib.utils import utils @@ -77,6 +78,7 @@ def __init__(self, config, cloud): self.mysql_connector = cloud.mysql_connector('cinder') self.volume_filter = None self.filter_tenant_id = None + self.tenant_name_map = mapper.Mapper('tenant_map') @property def cinder_client(self): @@ -185,7 +187,7 @@ def _read_info_quota(self): LOG.debug("Retrieved cinder quota for tenant '%s' (%s): %s", t.name, t.id, quota) - quota['tenant_name'] = t.name + quota['tenant_name'] = self.tenant_name_map.map(t.name) quotas.append(quota) return quotas diff --git a/cloudferry/lib/os/storage/plugins/nfs/generic.py b/cloudferry/lib/os/storage/plugins/nfs/generic.py index 5f775a35..cd8c1d84 100644 --- a/cloudferry/lib/os/storage/plugins/nfs/generic.py +++ b/cloudferry/lib/os/storage/plugins/nfs/generic.py @@ -52,12 +52,14 @@ def get_volume_object(self, context, volume_id): """:raises: VolumeObjectNotFoundError in case object is not found""" controller = context.cloud_config.cloud.ssh_host user = context.cloud_config.cloud.ssh_user + password = context.cloud_config.cloud.ssh_sudo_password paths = context.cloud_config.storage.nfs_mount_point_bases volume_template = context.cloud_config.storage.volume_name_template volume_pattern = generate_volume_pattern(volume_template, volume_id) - rr = remote_runner.RemoteRunner(controller, user, ignore_errors=True) + rr = remote_runner.RemoteRunner( + controller, user, ignore_errors=True, sudo=True, password=password) for mount_point in paths: # errors are ignored to avoid "Filesystem loop detected" messages diff --git a/cloudferry/lib/utils/files.py b/cloudferry/lib/utils/files.py index ccdbefaf..b7242f5c 100644 --- a/cloudferry/lib/utils/files.py +++ b/cloudferry/lib/utils/files.py @@ -78,6 +78,24 @@ def __exit__(self, exc_type, exc_val, exc_tb): ignoring_errors=True) +class FullAccessRemoteDir(RemoteDir): + def __init__(self, runner, dirname): + super(FullAccessRemoteDir, self).__init__(runner, dirname) + self.old_perms = None + + def __enter__(self): + new_dir = super(FullAccessRemoteDir, self).__enter__() + self.old_perms = try_remote_get_file_permissions(self.runner, + self.dirname) + try_remote_chmod(self.runner, 777, self.dirname) + return new_dir + + def __exit__(self, exc_type, exc_val, exc_tb): + super(FullAccessRemoteDir, self).__exit__(exc_type, exc_val, exc_tb) + if self.old_perms: + try_remote_chmod(self.runner, self.old_perms, self.dirname) + + def is_installed(runner, cmd): try: is_installed_cmd = "type {cmd} >/dev/null 2>&1".format(cmd=cmd) @@ -133,17 +151,20 @@ def __init__(self, runner, path): self.old_dir_perms = None def __enter__(self): + self.grant_permissions() + + def __exit__(self, *_): + self.restore_permissions() + + def grant_permissions(self): self.old_dir_perms = remote_get_file_permissions(self.runner, self.dir_path) - # there may be no file in destination (which is fine) self.old_file_perms = try_remote_get_file_permissions(self.runner, self.file_path) - LOG.debug("Temporarily adding full access to '%s' dir on '%s' host", self.dir_path, self.runner.host) try_remote_chmod(self.runner, 777, self.dir_path) - LOG.debug("Temporarily adding full access to '%s' file on '%s' host", self.file_path, self.runner.host) try_remote_chmod(self.runner, 666, self.file_path) @@ -154,7 +175,7 @@ def _restore_access(self, path, perms): perms, path, self.runner.host) try_remote_chmod(self.runner, perms, path) - def __exit__(self, *_): + def restore_permissions(self): self._restore_access(self.file_path, self.old_file_perms) self._restore_access(self.dir_path, self.old_dir_perms) diff --git a/cloudferry/lib/utils/mapper.py b/cloudferry/lib/utils/mapper.py new file mode 100644 index 00000000..1786722f --- /dev/null +++ b/cloudferry/lib/utils/mapper.py @@ -0,0 +1,80 @@ +# Copyright (c) 2016 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and# +# limitations under the License. +import logging +import os +import yaml + +from cloudferry import cfglib + +LOG = logging.getLogger(__name__) +CONF = cfglib.CONF + + +class Mapper(object): + _config = None + + @classmethod + def _load_configuration(cls): + resource_map_file = CONF.migrate.resource_map + if not os.path.exists(resource_map_file): + cls._load_deprecated_configuration() + return + + with open(resource_map_file, 'r') as f: + data = yaml.load(f) + if data is None: + cls._load_deprecated_configuration() + else: + if not isinstance(data, dict): + raise TypeError('%s root object must be dictionary!' % + (resource_map_file,)) + cls._config = data + + @classmethod + def _load_deprecated_configuration(cls): + ext_net_map_file = CONF.migrate.ext_net_map + if not os.path.exists(ext_net_map_file): + LOG.warning('Mapping configuration is absent!') + cls._config = {} + return + + with open(ext_net_map_file, 'r') as f: + data = yaml.load(f) + if data is None: + cls._config = {} + else: + if not isinstance(data, dict): + raise TypeError('%s root object must be dictionary!' % + (ext_net_map_file,)) + cls._config = {'ext_network_map': data} + + def __init__(self, mapping_name): + if self._config is None: + self._load_configuration() + self._mapping = self._config.get(mapping_name, {}) + + def __getitem__(self, item): + return self._mapping[item] + + def __contains__(self, item): + return item in self._mapping + + def get(self, item, default=None): + return self._mapping.get(item, default) + + def map(self, value): + return self._mapping.get(value, value) + + def iteritems(self): + return self._mapping.iteritems() diff --git a/cloudferry/model/__init__.py b/cloudferry/model/__init__.py index e2a4e683..d077f54b 100644 --- a/cloudferry/model/__init__.py +++ b/cloudferry/model/__init__.py @@ -620,6 +620,16 @@ def _retrieve_obj(self): with Session.current() as session: self._object = session.retrieve(self._model, self.primary_key) + def _get_object_or_none(self): + if self._object is None: + try: + self._retrieve_obj() + return self._object + except NotFound: + return None + else: + return self._object + def get(self, name): """ Returns object attribute by name. @@ -638,6 +648,25 @@ def get_class_qualname(self): """ return utils.qualname(self._model) + def equals(self, other): + """ + Returns True if objects are same even if they are in different clouds. + For example, same image that was manually uploaded to tenant with name + "admin" to different clouds are equal, and therefore don't need to be + migrated. + """ + left = self._get_object_or_none() + if isinstance(other, LazyObj): + right = self._get_object_or_none() + else: + right = other + if left is not None and right is not None and left.equals(right): + return True + elif other is not None: + return self.primary_key.id == other.primary_key.id + else: + return False + class Session(object): """ diff --git a/cloudferry_devlab/cloudferry_devlab/generate_load.py b/cloudferry_devlab/cloudferry_devlab/generate_load.py index 77c511de..e2976213 100644 --- a/cloudferry_devlab/cloudferry_devlab/generate_load.py +++ b/cloudferry_devlab/cloudferry_devlab/generate_load.py @@ -401,18 +401,10 @@ def create_vms(self, vm_list): self.attach_volume_to_vm(volume=volume, vm=new_vm) if volume.get('write_to_file'): self.write_data_to_volumes(fip, volume) - if vm.get('broken'): - self.break_vm(new_vm.id) - for vm_with_state in self.config.vm_states: - if new_vm.name == vm_with_state.get('name'): - msg = 'Changing the VM {} state to {}'\ - .format(new_vm.name, vm_with_state.get('state')) - self.log.info(msg) - res = self.set_vm_state(self.novaclient, new_vm.id, - vm_with_state.get('state'), - logger=self.log) - self.wait_until_objects([res], self.check_vm_state, - conf.TIMEOUT) + self.log.info('Shutting off VM %s', new_vm.name) + res = self.set_vm_state(self.novaclient, new_vm.id, 'SHUTOFF', + logger=self.log) + self.wait_until_objects([res], self.check_vm_state, conf.TIMEOUT) def create_all_vms(self): self.create_vms(self.config.vms) @@ -735,6 +727,13 @@ def write_data_to_volumes(self, vm_ip, volume): cmd = 'sh -c "md5sum {path}/{_file} > {path}/{_file}_md5"' self.migration_utils.execute_command_on_vm(vm_ip, cmd.format( path=path, _file=filename)) + self.migration_utils.execute_command_on_vm(vm_ip, 'sync') + self.migration_utils.execute_command_on_vm( + vm_ip, '/sbin/blockdev --flushbufs /dev/vda') + self.migration_utils.execute_command_on_vm( + vm_ip, '/sbin/blockdev --flushbufs /dev/vdb') + self.migration_utils.execute_command_on_vm( + vm_ip, 'umount {0}'.format(volume['mount_point'])) def create_invalid_cinder_objects(self): invalid_volume_tmlt = 'cinder_volume_%s' @@ -801,13 +800,14 @@ def emulate_vm_states(self): conf.TIMEOUT) self.wait_until_objects(vms, self.check_vm_state, conf.TIMEOUT) - def delete_flavor(self, flavor='del_flvr'): + def delete_flavors(self): """ Method for flavor deletion. """ try: - self.novaclient.flavors.delete( - self.get_flavor_id(flavor)) + for flavor in self.config.flavors_deleted_after_vm_boot: + self.novaclient.flavors.delete( + self.get_flavor_id(flavor)) except nv_exceptions.ClientException: self.log.warning("Flavor %s failed to delete:", flavor, exc_info=True) @@ -859,8 +859,11 @@ def create_tenant_wo_sec_group_on_dst(self): for t in self.config.tenants: if not t.get('deleted') and t['enabled']: try: + tnt_name = t['name'] + if t.get('uppercase'): + tnt_name = t['name'].upper() self.dst_cloud.keystoneclient.tenants.create( - tenant_name=t['name'], description=t['description'], + tenant_name=tnt_name, description=t['description'], enabled=t['enabled']) except ks_exceptions.Conflict: pass @@ -901,6 +904,9 @@ def create_user_on_dst(self): self.dst_cloud.create_users([user]) self.dst_cloud.create_user_tenant_roles([user_tenant_role]) + def create_user_in_uppercase_on_dst(self): + self.dst_cloud.create_users(self.config.dst_users) + def create_volumes_from_images(self): self.create_cinder_volumes(self.config.cinder_volumes_from_images) @@ -923,9 +929,12 @@ def break_vm(self, vm_id): """ inst_name = getattr(self.novaclient.servers.get(vm_id), 'OS-EXT-SRV-ATTR:instance_name') - cmd = 'virsh destroy {0} && virsh undefine {0}'.format(inst_name) - self.migration_utils.execute_command_on_vm( - self.get_vagrant_vm_ip(), cmd, username='root', password='') + for cmd in ['virsh destroy {0}'.format(inst_name), + 'virsh undefine {0}'.format(inst_name)]: + self.migration_utils.execute_command_on_vm( + self.get_vagrant_vm_ip(), cmd, + username=self.configuration_ini['src']['ssh_user'], + password=self.configuration_ini['src']['ssh_sudo_password']) def delete_image_on_dst(self): """ Method delete images with a 'delete_on_dst' flag on @@ -951,7 +960,9 @@ def break_images(self): image_id = self.get_image_id(image['name']) cmd = 'rm -rf /var/lib/glance/images/%s' % image_id self.migration_utils.execute_command_on_vm( - self.get_vagrant_vm_ip(), cmd, username='root', password='') + self.get_vagrant_vm_ip(), cmd, + username=self.configuration_ini['src']['ssh_user'], + password=self.configuration_ini['src']['ssh_sudo_password']) for image in images_to_delete: image_id = self.get_image_id(image['name']) self.glanceclient.images.delete(image_id) @@ -1093,6 +1104,8 @@ def run_preparation_scenario(self): self.create_tenant_wo_sec_group_on_dst() self.log.info('Create role on dst') self.create_user_on_dst() + self.log.info('Create user and tenant in upper case on dst') + self.create_user_in_uppercase_on_dst() self.log.info('Creating networks on dst') self.create_dst_networking() self.log.info('Creating vms on dst') @@ -1101,8 +1114,8 @@ def run_preparation_scenario(self): self.create_invalid_cinder_objects() self.log.info('Create swift containers and objects') self.create_swift_container_and_objects() - self.log.info('Deleting flavor') - self.delete_flavor() + self.log.info('Deleting flavors which should be deleted') + self.delete_flavors() self.log.info('Modifying admin tenant quotas') self.modify_admin_tenant_quotas() self.log.info('Update network quotas') @@ -1126,6 +1139,5 @@ def run_restore_vms_state(self): self.emulate_vm_states() self.log.info('Breaking VMs') for vm in [self.get_vm_id(vm['name']) for vm in - self.migration_utils.get_all_vms_from_config() - if vm.get('broken')]: + self.src_vms_from_config if vm.get('broken')]: self.break_vm(vm) diff --git a/cloudferry_devlab/cloudferry_devlab/tests/base.py b/cloudferry_devlab/cloudferry_devlab/tests/base.py index 4dc975e6..e761c9ba 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/base.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/base.py @@ -25,8 +25,8 @@ from novaclient import exceptions as nova_exceptions from novaclient.v2 import client as nova from swiftclient import client as swift_client -from nose.config import Config, all_config_files -from nose.plugins.manager import DefaultPluginManager +from nose import config as nose_config +from nose.plugins import manager as nose_manager from cloudferry_devlab.tests import test_exceptions import cloudferry_devlab.tests.utils as utils @@ -68,6 +68,8 @@ def __init__(self, config, configuration_ini, cloud_prefix='SRC', self.filtering_utils = utils.FilteringUtils( self.configuration_ini['migrate']['filter_path']) self.migration_utils = utils.MigrationUtils(config) + self.src_vms_from_config = \ + self.migration_utils.get_all_vms_from_config() self.config = config self.cloud_prefix = cloud_prefix.lower() @@ -260,9 +262,21 @@ def get_volume_snapshot_id(self, snapshot_name): def get_user_tenant_roles(self, user): user_tenant_roles = [] for tenant in self.keystoneclient.tenants.list(): - user_tenant_roles.extend(self.keystoneclient.roles.roles_for_user( - user=self.get_user_id(user.name), - tenant=self.get_tenant_id(tenant.name))) + user_tenant_roles.extend( + self.keystoneclient.roles.roles_for_user( + user=self.get_user_id(user.name), + tenant=self.get_tenant_id(tenant.name))) + return user_tenant_roles + + def get_roles_for_user(self, user, tenant_attrib): + user_tenant_roles = [] + for tenant in self.keystoneclient.tenants.list(): + if tenant.name.lower() == tenant_attrib.lower(): + user_tenant_roles.extend( + self.keystoneclient.roles.roles_for_user( + user=self.get_user_id(user.name), + tenant=self.get_tenant_id(tenant.name))) + break return user_tenant_roles def get_ext_routers(self): @@ -460,9 +474,9 @@ def set_vm_state(novaclient, vm_id, vm_state, logger=None): def get_nosetest_cmd_attribute_val(attribute): env = os.environ - manager = DefaultPluginManager() - cfg_files = all_config_files() - tmp_config = Config(env=env, files=cfg_files, plugins=manager) + manager = nose_manager.DefaultPluginManager() + cfg_files = nose_config.all_config_files() + tmp_config = nose_config.Config(env=env, files=cfg_files, plugins=manager) tmp_config.configure() try: attr_list = getattr(tmp_config.options, 'attr') diff --git a/cloudferry_devlab/cloudferry_devlab/tests/cleanup.py b/cloudferry_devlab/cloudferry_devlab/tests/cleanup.py index 0ffe9497..bf529206 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/cleanup.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/cleanup.py @@ -32,8 +32,7 @@ class CleanEnv(base.BasePrerequisites): def clean_vms(self): - vms = self.migration_utils.get_all_vms_from_config() - vms_names = [vm['name'] for vm in vms] + vms_names = [vm['name'] for vm in self.src_vms_from_config] vms = self.novaclient.servers.list(search_opts={'all_tenants': 1}) vms_ids = [] for vm in vms: diff --git a/cloudferry_devlab/cloudferry_devlab/tests/config.py b/cloudferry_devlab/cloudferry_devlab/tests/config.py index 92419a38..f43982cd 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/config.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/config.py @@ -67,6 +67,12 @@ """This file contains map of relationships between external networks on source and destination clouds.""" +case_sensitivity_test_user = 'user10' +"""User to be created in upper case on DST Cloud.""" + +case_sensitivity_test_tenant = 'tenant6' +"""Tenant to be created in upper case on DST Cloud.""" + users = [ {'name': 'user1', 'password': 'passwd1', 'email': 'mail@example.com', 'tenant': 'tenant1', 'enabled': True}, @@ -89,14 +95,26 @@ 'tenant': 'tenant3', 'enabled': True}, {'name': 'user9', 'password': 'passwd', 'email': 'user8@example.com', 'tenant': 'tenant5', 'enabled': True}, + {'name': case_sensitivity_test_user, 'password': 'passwd', + 'email': 'user10@example.com', + 'tenant': case_sensitivity_test_tenant, 'enabled': True}, {'name': 'user11', 'password': 'passwd', 'email': 'user11@example.com', 'tenant': 'tenant7', 'enabled': True} ] -"""Users to create/delete""" +"""SRC Users to create/delete""" + +dst_users = [ + {'name': case_sensitivity_test_user.upper(), 'password': 'PASSWD', + 'email': 'user_10@example.com', + 'tenant': case_sensitivity_test_tenant.upper(), 'enabled': True} +] +"""DST Users to create""" user_tenant_roles = [ {'user9': [{'tenant': 'tenant5', 'role': 'SomeRole'}]}, - {'user1': [{'tenant': 'tenant1', 'role': 'SomeRole'}]} + {'user1': [{'tenant': 'tenant1', 'role': 'SomeRole'}]}, + {case_sensitivity_test_user: [{'tenant': case_sensitivity_test_tenant, + 'role': 'SecondRole'}]} ] roles = [ @@ -368,7 +386,8 @@ {'cidr': '123.2.2.0/24', 'ip_version': 4, 'name': 't5_s2'}]}], 'vms': [ {'name': 'tn5server1', 'image': 'image1', 'flavor': 'flavorname2', - 'nics': [{'net-id': 'tenant5net', 'v4-fixed-ip': '122.2.2.100'}]}, + 'nics': [{'net-id': 'tenant5net', 'v4-fixed-ip': '122.2.2.100'}], + 'already_on_dst': True}, {'name': 'tn5server2', 'image': 'image1', 'flavor': 'flavorname2', 'nics': [{'net-id': 'tenant5net2', 'v4-fixed-ip': '123.2.2.100'}]}, {'name': 'tn5server3', 'image': 'image1', 'flavor': 'flavorname2', @@ -377,13 +396,23 @@ 'images': [{'name': 'cirros_image_for_tenant5', 'copy_from': img_url, 'is_public': True}], }, + {'name': case_sensitivity_test_tenant, 'description': 'None', + 'enabled': True, 'uppercase': True + }, {'name': 'tenant7', 'description': 'Tenant7 filter has excluded images', - 'enabled': True, 'exclude_images': True, + 'enabled': True, 'images': [{'name': 'image7', 'copy_from': img_url, 'is_public': False}, {'name': 'image8', 'copy_from': img_url, 'is_public': False}] } ] -"""Tenants to create/delete""" +"""Tenants to create/delete +`case_sensitivity_test_tenant` covers this scenario: + - Create user and tenant in source cloud + - Create user and tenant with the same names in the uppercase in + destination cloud + - Create user roles for the user created + - Run identity migration + - Verify user roles migrated correctly""" images = [ {'name': 'image1', 'copy_from': img_url, 'is_public': True, @@ -443,6 +472,9 @@ images_blacklisted = ['image7'] """Images blacklisted""" +flavors_deleted_after_vm_boot = ["del_flvr"] +"""Flavors to be deleted after booting VM from them""" + vms_not_in_filter = ['not_in_filter'] """Instances not to be included in filter""" @@ -457,6 +489,10 @@ 'is_public': False}, {'name': 'flavorname2', 'disk': '2', 'ram': '48', 'vcpus': '2'}, {'name': 'del_flvr', 'disk': '1', 'ram': '64', 'vcpus': '1'}, + {'name': 'diffattrib_flvr', 'flavorid': '666', 'disk': '5', 'ram': '96', + 'vcpus': '1', "ephemeral": '0', 'is_public': True, 'is_deleted': True}, + {'name': 'diffattrib_flvr', 'flavorid': '666', 'disk': '4', 'ram': '64', + 'vcpus': '2', "ephemeral": '0', 'is_public': False}, {'name': 'deleted_flavor', 'flavorid': '777', 'disk': '1', 'ram': '48', 'vcpus': '1', "ephemeral": '0', 'is_deleted': True}, {'name': 'recreated_flavor', 'flavorid': '777', 'disk': '1', 'ram': '48', @@ -590,7 +626,7 @@ {'name': 'server1', 'image': 'image1', 'flavor': 'flavorname1'}, {'name': 'server2', 'image': 'deleted_on_dst', 'flavor': 'del_flvr', 'server_group': 'admin_server_group', 'config_drive': True, 'fip': True}, - {'name': 'server3', 'image': 'deleted_image', 'flavor': 'flavorname2'}, + {'name': 'server3', 'image': 'deleted_image', 'flavor': 'diffattrib_flvr'}, {'name': 'server4', 'image': 'broken_image', 'flavor': 'flavorname2'}, {'name': 'server5', 'image': 'image1', 'flavor': 'flavorname1', 'broken': True}, diff --git a/cloudferry_devlab/cloudferry_devlab/tests/functional_test.py b/cloudferry_devlab/cloudferry_devlab/tests/functional_test.py index 69f0d77b..fdf7979a 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/functional_test.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/functional_test.py @@ -60,6 +60,8 @@ def setUp(self): configuration_ini=config_ini, config=config) self.migration_utils = utils.MigrationUtils(config) + self.src_vms_from_config = \ + self.migration_utils.get_all_vms_from_config() self.config_ini_path = config_ini['general']['configuration_ini_path'] self.cloudferry_dir = config_ini['general']['cloudferry_dir'] tenant = base.get_nosetest_cmd_attribute_val('migrated_tenant') @@ -156,12 +158,17 @@ def filter_roles(self): return self._get_keystone_resources('roles', roles) def get_src_vm_objects_specified_in_config(self): - vms = self.migration_utils.get_all_vms_from_config() - vms_names = [vm['name'] for vm in vms if not vm.get('broken')] + vms_names = [vm['name'] for vm in self.src_vms_from_config + if not vm.get('broken')] opts = {'search_opts': {'all_tenants': 1}} return [i for i in self.src_cloud.novaclient.servers.list(**opts) if i.name in vms_names] + def filter_vms_already_on_dst(self, src_data_list): + vms_names = [vm['name'] for vm in self.src_vms_from_config + if not vm.get('already_on_dst')] + return [i for i in src_data_list if i.name in vms_names] + def filter_flavors(self, filter_only_private=False): flavors = [] if filter_only_private: @@ -298,6 +305,26 @@ def _is_segm_id_test(self, param, name): name in config.NET_NAMES_TO_OMIT or name in config.SUBNET_NAMES_TO_OMIT) + def compare_vm_flavor_parameter(self, field, vm1, vm2): + msgs = [] + vm1_param = getattr(vm1, 'flavor')[field] + vm2_param = getattr(vm2, 'flavor')[field] + vm1_flavor = self.src_cloud.novaclient.flavors.get(vm1_param) + vm2_flavor = self.dst_cloud.novaclient.flavors.get(vm2_param) + vm1_flavor_name = getattr(vm1_flavor, "name") + vm2_flavor_name = getattr(vm2_flavor, "name") + if vm1_flavor_name in config.flavors_deleted_after_vm_boot: + vm1_param = "deleted_{src_flavor_id}".format( + src_flavor_id=vm1_param) + vm2_param = vm2_flavor_name + if vm1_param != vm2_param: + error_msg = ('Flavor Field {field} for VM with name ' + '{name} is different src: {vm1}, dst: {vm2}') + msgs.append(error_msg.format(field=field, name=vm1.name, + vm1=getattr(vm1, 'flavor'), + vm2=getattr(vm2, 'flavor'))) + return msgs + def validate_resource_parameter_in_dst(self, src_list, dst_list, resource_name, parameter): if not src_list: diff --git a/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_keystone_migration.py b/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_keystone_migration.py index 833f5a97..79695955 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_keystone_migration.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_keystone_migration.py @@ -16,6 +16,8 @@ from nose.plugins.attrib import attr from cloudferry_devlab.tests import functional_test +import cloudferry_devlab.tests.config as config +import cloudferry_devlab.tests.base as base @generator @@ -34,24 +36,45 @@ def test_migrate_keystone_users(self, param): :param name: user's name :param description: user's description :param enabled: is user enabled""" - self.validate_resource_parameter_in_dst(self.src_users, self.dst_users, + filtered_src_users = [user for user in self.src_users + if getattr(user, "name") != + config.case_sensitivity_test_user] + self.validate_resource_parameter_in_dst(filtered_src_users, + self.dst_users, resource_name='user', parameter=param) - @attr(migrated_tenant=['admin', 'tenant1', 'tenant2']) + @attr(migrated_tenant=['admin', 'tenant1', 'tenant2', + config.case_sensitivity_test_tenant]) def test_migrate_keystone_user_tenant_roles(self): """Validate user's tenant roles were migrated with correct name.""" - src_user_names = [user.name for user in self.filter_users()] + tenant = base.get_nosetest_cmd_attribute_val('migrated_tenant') for dst_user in self.dst_users: - if dst_user.name not in src_user_names: + src_user = None + for user in self.src_users: + if user.name.lower() == dst_user.name.lower(): + src_user = user + break + if src_user is None: + continue + if tenant is None: + src_user_tnt_roles = self.src_cloud.get_user_tenant_roles( + src_user) + dst_user_tnt_roles = self.dst_cloud.get_user_tenant_roles( + dst_user) + else: + src_user_tnt_roles = self.src_cloud.get_roles_for_user( + src_user, tenant) + dst_user_tnt_roles = self.dst_cloud.get_roles_for_user( + dst_user, tenant) + if len(src_user_tnt_roles) == 0 and len(dst_user_tnt_roles) == 0: continue - src_user_tnt_roles = self.src_cloud.get_user_tenant_roles(dst_user) - dst_user_tnt_roles = self.dst_cloud.get_user_tenant_roles(dst_user) self.validate_resource_parameter_in_dst( src_user_tnt_roles, dst_user_tnt_roles, resource_name='user_tenant_role', parameter='name') - @attr(migrated_tenant=['admin', 'tenant1', 'tenant2']) + @attr(migrated_tenant=['admin', 'tenant1', 'tenant2', + config.case_sensitivity_test_tenant]) @generate('name', 'description', 'enabled') def test_migrate_keystone_roles(self, param): """Validate user's roles were migrated with correct parameters. @@ -67,7 +90,8 @@ def test_migrate_keystone_roles(self, param): resource_name='role', parameter=param) - @attr(migrated_tenant=['admin', 'tenant1', 'tenant2']) + @attr(migrated_tenant=['admin', 'tenant1', 'tenant2', + config.case_sensitivity_test_tenant]) @generate('name', 'description', 'enabled') def test_migrate_keystone_tenants(self, param): """Validate tenants were migrated with correct name and description. @@ -81,6 +105,9 @@ def test_migrate_keystone_tenants(self, param): filtering_data = self.filtering_utils.filter_tenants(src_tenants) src_tenants = filtering_data[0] + src_tenants = [tenant for tenant in src_tenants + if getattr(tenant, "name") != + config.case_sensitivity_test_tenant] self.validate_resource_parameter_in_dst(src_tenants, dst_tenants, resource_name='tenant', diff --git a/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_nova_resource_migration.py b/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_nova_resource_migration.py index 8ae61aaf..bd4c1d04 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_nova_resource_migration.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_nova_resource_migration.py @@ -101,20 +101,21 @@ def get_tenant_quotas(tenants, client): """ qs = {} for t in tenants: - qs[t.name] = {'nova_q': {}, 'neutron_q': {}, 'cinder_q': {}} + qs[t.name.lower()] = {'nova_q': {}, 'neutron_q': {}, + 'cinder_q': {}} nova_quota = client.novaclient.quotas.get(t.id).to_dict() for k, v in nova_quota.iteritems(): if k in src_nova_quota_keys and k != 'id': - qs[t.name]['nova_q'][k] = v + qs[t.name.lower()]['nova_q'][k] = v neutron_quota = client.neutronclient.show_quota(t.id)['quota'] for k, v in neutron_quota.iteritems(): if k in src_neutron_quota_keys: - qs[t.name]['neutron_q'][k] = v + qs[t.name.lower()]['neutron_q'][k] = v cinder_quota = getattr(client.cinderclient.quotas.get(t.id), '_info') for k, v in cinder_quota.iteritems(): if k in cinder_quota_keys and k != 'id': - qs[t.name]['cinder_q'][k] = v + qs[t.name.lower()]['cinder_q'][k] = v return qs src_nova_quota_keys = self.src_cloud.novaclient.quotas.get( diff --git a/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_vm_migration.py b/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_vm_migration.py index 621f0f6e..2b967440 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_vm_migration.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/testcases/test_vm_migration.py @@ -18,6 +18,7 @@ from fabric.network import NetworkError from generator import generator, generate from nose.plugins.attrib import attr +import copy from cloudferry_devlab.tests import config from cloudferry_devlab.tests import functional_test @@ -41,14 +42,13 @@ def setUp(self): "file. Probably in the instances id list in the " "config file is not specified VMs for migration " "or is specified tenant id and VMs that not belong " - "to this tenant.") + "to this tenant or All VMS in SRC in error state") - src_vms = [vm for vm in src_vms if vm.status != 'ERROR' and - self.tenant_exists(self.src_cloud.keystoneclient, - vm.tenant_id)] + src_vms = [vm for vm in src_vms + if self.tenant_exists(self.src_cloud.keystoneclient, + vm.tenant_id)] if not src_vms: - self.fail("All VMs in SRC was in error state or " - "VM's tenant in SRC doesn't exist") + self.fail("VM's tenant in SRC doesn't exist") dst_vms = self.dst_cloud.novaclient.servers.list( search_opts={'all_tenants': 1}) @@ -66,6 +66,8 @@ def setUp(self): 'src_vm': s_vm, 'dst_vm': d_vm, }) + self.dst_vms = dst_vms + self.src_vms = src_vms @attr(migrated_tenant=['admin', 'tenant1', 'tenant2']) def test_vms_not_in_filter_stay_active_on_src(self): @@ -84,8 +86,7 @@ def test_vms_not_in_filter_stay_active_on_src(self): @attr(migrated_tenant=['admin', 'tenant1', 'tenant2']) def test_vm_not_in_filter_did_not_migrate(self): """Validate VMs not included in filter file weren't migrated.""" - dst_vms = [x.name for x in self.dst_cloud.novaclient.servers.list( - search_opts={'all_tenants': 1})] + dst_vms = [x.name for x in self.dst_vms] for vm in config.vms_not_in_filter: msg = 'VM migrated despite that it was '\ 'not included in filter, VM info: \n{vm}' @@ -169,16 +170,9 @@ def test_migrate_vms_parameters(self, param): :param key_name: :param security_groups: :param metadata:""" - src_vms = self.get_src_vm_objects_specified_in_config() - dst_vms = self.dst_cloud.novaclient.servers.list( - search_opts={'all_tenants': 1}) - - filtering_data = self.filtering_utils \ - .filter_vms_with_filter_config_file(src_vms) - src_vms = filtering_data[0] - src_vms = [vm for vm in src_vms if vm.status != 'ERROR'] def compare_vm_parameter(param, vm1, vm2): + msgs = [] vm1_param = getattr(vm1, param, None) vm2_param = getattr(vm2, param, None) if param == "config_drive" and vm1_param == u'1': @@ -186,31 +180,50 @@ def compare_vm_parameter(param, vm1, vm2): if vm1_param != vm2_param: error_msg = ('Parameter {param} for VM with name ' '{name} is different src: {vm1}, dst: {vm2}') - self.fail(error_msg.format(param=param, name=vm1.name, - vm1=getattr(vm1, param), - vm2=getattr(vm2, param))) + msgs.append(error_msg.format(param=param, name=vm1.name, + vm1=getattr(vm1, param), + vm2=getattr(vm2, param))) + return msgs + + fail_msg = [] + for src_vm in self.src_vms: + for dst_vm in self.dst_vms: + if src_vm.vm_hash != dst_vm.vm_hash: + continue + fail_msg.extend(compare_vm_parameter( + param, src_vm, dst_vm)) + break + else: + msg = 'VM with hash %s was not found on dst' + fail_msg.append(msg % str(src_vm.vm_hash)) + if fail_msg: + self.fail('\n'.join(fail_msg)) + + def test_migrate_vms_flavor_parameter(self): + """Validate VMs were migrated with correct flavor.""" + + fail_msg = [] + src_vms = copy.copy(self.src_vms) + src_vms = self.filter_vms_already_on_dst(src_vms) - self.set_hash_for_vms(src_vms) - self.set_hash_for_vms(dst_vms) - if not src_vms: - self.skipTest('Nothing to check - source resources list is empty') for src_vm in src_vms: - for dst_vm in dst_vms: + for dst_vm in self.dst_vms: if src_vm.vm_hash != dst_vm.vm_hash: continue - compare_vm_parameter(param, src_vm, dst_vm) + fail_msg.extend(self.compare_vm_flavor_parameter( + 'id', src_vm, dst_vm)) break else: msg = 'VM with hash %s was not found on dst' - self.fail(msg % str(src_vm.vm_hash)) + fail_msg.append(msg % str(src_vm.vm_hash)) + if fail_msg: + self.fail('\n'.join(fail_msg)) @attr(migrated_tenant=['admin', 'tenant1', 'tenant2']) def test_migrate_vms_with_floating(self): """Validate VMs were migrated with floating ip assigned.""" vm_names_with_fip = self.get_vms_with_fip_associated() - dst_vms = self.dst_cloud.novaclient.servers.list( - search_opts={'all_tenants': 1}) - for vm in dst_vms: + for vm in self.dst_vms: if vm.name not in vm_names_with_fip: continue for net in vm.addresses.values(): @@ -225,8 +238,8 @@ def test_not_valid_vms_did_not_migrate(self): """Validate VMs with invalid statuses weren't migrated. Invalid VMs have 'broken': True value in :mod:`config.py` """ - all_vms = self.migration_utils.get_all_vms_from_config() - vms = [vm['name'] for vm in all_vms if vm.get('broken')] + vms = [vm['name'] for vm in self.src_vms_from_config + if vm.get('broken')] migrated_vms = [] for vm in vms: try: @@ -240,9 +253,7 @@ def test_not_valid_vms_did_not_migrate(self): @attr(migrated_tenant='tenant2') def test_ssh_connectivity_by_keypair(self): """Validate migrated VMs ssh connectivity by keypairs.""" - vms = self.dst_cloud.novaclient.servers.list( - search_opts={'all_tenants': 1}) - for _vm in vms: + for _vm in self.dst_vms: if 'keypair_test' in _vm.name: vm = _vm break diff --git a/cloudferry_devlab/cloudferry_devlab/tests/utils.py b/cloudferry_devlab/cloudferry_devlab/tests/utils.py index 13f04e88..29615d5b 100644 --- a/cloudferry_devlab/cloudferry_devlab/tests/utils.py +++ b/cloudferry_devlab/cloudferry_devlab/tests/utils.py @@ -18,6 +18,7 @@ from fabric import network from neutronclient.common import exceptions import yaml +import copy import cloudferry_devlab.tests.config as config @@ -82,6 +83,14 @@ def filter_vms_with_filter_config_file(self, src_data_list): popped_vm_list.append(vm) current_data_list = filtered_vms_by_id + filtered_vms_by_state = [] + for vm in current_data_list: + if vm.status != 'ERROR': + filtered_vms_by_state.append(vm) + else: + popped_vm_list.append(vm) + current_data_list = filtered_vms_by_state + return [current_data_list, popped_vm_list] def filter_images(self, src_data_list): @@ -155,7 +164,7 @@ def execute_command_on_vm(self, ip_addr, cmd, username=None, % ip_addr) def get_all_vms_from_config(self): - vms = self.config.vms + vms = copy.deepcopy(self.config.vms) for tenant in self.config.tenants: if not tenant.get('vms') or tenant.get('deleted'): continue @@ -165,7 +174,7 @@ def get_all_vms_from_config(self): return vms def get_all_images_from_config(self): - images = self.config.images + images = copy.deepcopy(self.config.images) for tenant in self.config.tenants: if not tenant.get('images'): continue diff --git a/cloudferry_devlab/provision/configure_openstack.sh b/cloudferry_devlab/provision/configure_openstack.sh index e233f2ae..9d1d7b20 100644 --- a/cloudferry_devlab/provision/configure_openstack.sh +++ b/cloudferry_devlab/provision/configure_openstack.sh @@ -31,9 +31,11 @@ crudini --set /etc/nova/nova.conf DEFAULT osapi_compute_workers 1 crudini --set /etc/nova/nova.conf DEFAULT metadata_workers 1 crudini --set /etc/nova/nova.conf DEFAULT allow_resize_to_same_host True crudini --set /etc/nova/nova.conf DEFAULT allow_migrate_to_same_host True +crudini --set /etc/nova/nova.conf DEFAULT api_rate_limit False crudini --set /etc/nova/nova.conf conductor workers 1 service nova-api restart service nova-conductor restart service nova-compute restart +service nova-scheduler restart EOF diff --git a/cloudferry_devlab/requirements.txt b/cloudferry_devlab/requirements.txt index 1f31233b..17286d6c 100644 --- a/cloudferry_devlab/requirements.txt +++ b/cloudferry_devlab/requirements.txt @@ -1,4 +1,7 @@ fabric==1.10.2 +oslo.config==3.9.0 +oslo.i18n==3.7.0 +oslo.serialization==2.7.0 oslo.utils==3.5.0 python-cinderclient==1.3.1 python-glanceclient==1.1.0 diff --git a/configs/ext_net_map.yaml b/configs/ext_net_map.yaml index daac14c4..12489a05 100644 --- a/configs/ext_net_map.yaml +++ b/configs/ext_net_map.yaml @@ -1,3 +1,7 @@ +############################################################################### +### !!! THIS FILE IS DEPRECATED, USE resource_map.yaml INSTEAD !!! ### +############################################################################### +# # This file contains map of relationships between # external networks on source and destination clouds. # diff --git a/configs/resource_map.yaml b/configs/resource_map.yaml new file mode 100644 index 00000000..27402038 --- /dev/null +++ b/configs/resource_map.yaml @@ -0,0 +1,15 @@ +# This file contains map of relationships between objects on source and +# destination clouds. +# +# Use the following format: +# ext_network_map: +# : +# tenant_map: +# : +# +# F.e.: +#ext_network_map: +# cfd33270-8a4c-4fb9-8af9-7c83f82b8a5b: 7c325f53-b12b-4b07-b78a-0478d863f625 +# +#tenant_map: +# WTFaaS_BBQoD_Dev_Test: WTFBBQ-313373-V-PZD-05 diff --git a/tests/lib/os/actions/test_check_networks.py b/tests/lib/os/actions/test_check_networks.py index 492710e5..e0a6d1b7 100644 --- a/tests/lib/os/actions/test_check_networks.py +++ b/tests/lib/os/actions/test_check_networks.py @@ -353,6 +353,7 @@ def test_allocation_pools_overlap(self): 'provider:segmentation_id': None, 'router:external': True}], 'subnets': [{'cidr': '1.1.1.1/24', + 'name': 'snet1', 'res_hash': 2, 'network_id': 'id1', 'id': 'sub1', @@ -373,6 +374,7 @@ def test_allocation_pools_overlap(self): 'provider:segmentation_id': None, 'router:external': True}], 'subnets': [{'cidr': '1.1.1.1/25', + 'name': 'snet1', 'res_hash': 3, 'network_id': 'id2', 'id': 'sub2', diff --git a/tests/lib/os/network/test_neutron.py b/tests/lib/os/network/test_neutron.py index e03e1c05..f3ffd90d 100644 --- a/tests/lib/os/network/test_neutron.py +++ b/tests/lib/os/network/test_neutron.py @@ -949,8 +949,6 @@ def test_router_class(self): @mock.patch("cloudferry.lib.os.network.neutron.neutron_client.Client") -@mock.patch("cloudferry.lib.os.network.neutron.utl.read_yaml_file", - mock.MagicMock()) class NeutronClientTestCase(test.TestCase): def test_adds_region_if_set_in_config(self, n_client): cloud = mock.MagicMock() diff --git a/tests/model/test_model.py b/tests/model/test_model.py index 8c5ce260..8bf9c760 100644 --- a/tests/model/test_model.py +++ b/tests/model/test_model.py @@ -22,6 +22,11 @@ class ExampleReferenced(model.Model): object_id = model.PrimaryKey() qux = model.Integer(required=True) + def equals(self, other): + if super(ExampleReferenced, self).equals(other): + return True + return self.qux == other.qux + @classmethod def create_object(cls, cloud, cloud_obj_id): with model.Session() as session: @@ -104,6 +109,39 @@ def generate_data(cls, object_id=None, cloud='test_cloud'): } +class ExampleRef(model.Model): + object_id = model.PrimaryKey() + ref = model.Reference(ExampleReferenced, allow_none=True) + + def equals(self, other): + # pylint: disable=no-member + if super(ExampleRef, self).equals(other): + return True + if self.ref is None: + return other.ref is None + return self.ref.equals(other.ref) + + @classmethod + def create_object(cls, cloud, unique_id, ref_unique_id): + data = { + 'object_id': { + 'cloud': cloud, + 'id': unique_id, + 'type': cls.get_class_qualname(), + }, + } + if ref_unique_id is not None: + ref = { + 'cloud': cloud, + 'id': ref_unique_id, + 'type': ExampleReferenced.get_class_qualname(), + } + else: + ref = None + data['ref'] = ref + return cls.load(data) + + class ModelTestCase(test_local_db.DatabaseMockingTestCase): def setUp(self): super(ModelTestCase, self).setUp() @@ -329,3 +367,39 @@ def test_nested_sessions_save_updates_after_nested(self): bar_value='some other non-random string') self._validate_example_obj( object2_id, s2.retrieve(Example, object2_id)) + + def test_absent_reference_equals1(self): + object1 = ExampleRef.create_object( + 'test_cloud1', 'example_ref_id', 'example_referenced_id') + object2 = ExampleRef.create_object( + 'test_cloud2', 'example_ref_id', 'example_referenced_id') + self.assertTrue(object1.equals(object2)) + + def test_absent_reference_equals2(self): + object1 = ExampleRef.create_object( + 'test_cloud1', 'example_ref_id', 'example_referenced_id') + object2 = ExampleRef.create_object( + 'test_cloud2', 'example_ref_id', 'other_referenced_id') + self.assertFalse(object1.equals(object2)) + + def test_absent_reference_equals3(self): + object1 = ExampleRef.create_object( + 'test_cloud1', 'example_ref_id', None) + object2 = ExampleRef.create_object( + 'test_cloud2', 'example_ref_id', None) + self.assertTrue(object1.equals(object2)) + + def test_absent_reference_equals4(self): + with model.Session(): + ExampleReferenced.create_object( + 'test_cloud1', 'example_referenced_id') + ExampleReferenced.create_object( + 'test_cloud2', 'other_referenced_id') + + object1 = ExampleRef.create_object( + 'test_cloud1', 'example_ref_id', 'example_referenced_id') + object2 = ExampleRef.create_object( + 'test_cloud2', 'example_ref_id', 'other_referenced_id') + # We have equivalent objects referenced by example_referenced_id and + # other_referenced_id this time + self.assertTrue(object1.equals(object2))