From 6da1c21133a39525a260c6ba90ca2f931b11c8d2 Mon Sep 17 00:00:00 2001 From: Anurag Menon Date: Thu, 12 Apr 2018 17:14:18 -0400 Subject: [PATCH 01/10] Changes for snapshots --- .../ajax/openstackDeploymentStatus.html | 138 ++++-- ajax/templates/ajax/snapshot_list.html | 61 +++ ajax/urls.py | 8 + ajax/views.py | 223 ++++++++-- common/lib/openstackUtils.py | 421 +++++++++--------- common/lib/wistarUtils.py | 328 +++++++++----- common/static/js/topology_utils.js | 8 + topologies/templates/topologies/edit.html | 132 +++++- .../topologies/overlay/rebuild_instance.html | 62 +++ topologies/urls.py | 3 + topologies/views.py | 150 ++++++- 11 files changed, 1087 insertions(+), 447 deletions(-) create mode 100644 ajax/templates/ajax/snapshot_list.html create mode 100644 topologies/templates/topologies/overlay/rebuild_instance.html diff --git a/ajax/templates/ajax/openstackDeploymentStatus.html b/ajax/templates/ajax/openstackDeploymentStatus.html index 7c7ca87..dfa8d22 100644 --- a/ajax/templates/ajax/openstackDeploymentStatus.html +++ b/ajax/templates/ajax/openstackDeploymentStatus.html @@ -1,14 +1,22 @@ {% load staticfiles %} + + + + + + + - + {% if stack == None %} @@ -27,82 +35,142 @@ {% else %} - {% if 'COMPLETE' not in stack.stack_status %} + {% if stack.stack_status != "CREATE_COMPLETE" %} - + - + + + + + + {% else %} {% for resource in stack_resources.resources %} {% if resource.resource_type == "OS::Nova::Server" %} + - + + {% endif %} {% endfor %} {% endif %} + + + + + + + - + + + + - + + + + + + + + + + + + + + + + + + {% endif %}
Stack Status Stack Status
Status + {{ stack.stack_status }}
Status Detail + {{ stack.stack_status_reason }}
+ onclick="javascript: window.open('http://{{ openstack_host }}/dashboard/project/instances/{{ resource.physical_resource_id }}')"> {{ resource.resource_name }} - {% if 'COMPLETE' in resource.resource_status %} -
-   +
+ Status + +   + + {% if resource.resource_status == "CREATE_COMPLETE" %} +   {% else %} -
-   + {% endif %}
Options +
+ + View in Horizon + -
- 🔍 -
-   -
- -
-   -
- -
-   + +
+ + Delete Stack + + + + Debug HEAT + + + + Update HEAT +
+
+ HEAT Snapshots +
+ + Create Snapshots + + + + List Snapshots + +
+ diff --git a/ajax/templates/ajax/snapshot_list.html b/ajax/templates/ajax/snapshot_list.html new file mode 100644 index 0000000..f25ca61 --- /dev/null +++ b/ajax/templates/ajax/snapshot_list.html @@ -0,0 +1,61 @@ +{% extends "base.html" %} +{% block title %}Wistar - Lab Rat - Snapshot List{% endblock %} +{% load staticfiles %} +{% block content %} +
+ +

Stack Snapshot List

+ + + + + + + + + {% for snapshot in snapshot_list %} + + + + + + + {% endfor %} +
NameSnapshot IdStack NameOptions
+ {{snapshot.name }} + + {{ snapshot.id }} + + {{ snapshot.stack_name }} + + + +
+ +
+{% endblock %} diff --git a/ajax/urls.py b/ajax/urls.py index 59520b4..2398048 100644 --- a/ajax/urls.py +++ b/ajax/urls.py @@ -43,7 +43,15 @@ url(r'^deployTopology/$', views.deploy_topology, name='deployTopology'), url(r'^redeployTopology/$', views.redeploy_topology, name='redeployTopology'), url(r'^deployStack/(?P[^/]+)$', views.deploy_stack, name='deployStack'), + url(r'^updateStack/(?P[^/]+)$', views.update_stack, name='updateStack'), url(r'^deleteStack/(?P[^/]+)$', views.delete_stack, name='deleteStack'), + #url(r'^createSnapshot/(?P[^/]+)$', views.create_snapshot, name='createSnapshot'), + #url(r'^createSnapshot/$', views.create_snapshot, name='createSnapshot'), + url(r'^listSnapshot/(?P[^/]+)$', views.list_snapshot, name='listSnapshot'), + url(r'^deleteSnapshot/(?P[^/]+)/(?P[^/]+)/$', views.delete_snapshot, + name='deleteSnapshot'), + url(r'^rollbackSnapshot/(?P[^/]+)/(?P[^/]+)/$', views.rollback_snapshot, + name='rollbackSnapshot'), url(r'^startTopology/$', views.start_topology, name='startTopology'), url(r'^pauseTopology/$', views.pause_topology, name='pauseTopology'), url(r'^manageDomain/$', views.manage_domain, name='manageDomain'), diff --git a/ajax/views.py b/ajax/views.py index c3c09fe..7e5642d 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -35,6 +35,7 @@ from common.lib import linuxUtils from common.lib import openstackUtils from common.lib import osUtils +from common.lib import vboxUtils from common.lib import wistarUtils from common.lib.WistarException import WistarException from images.models import Image @@ -260,26 +261,23 @@ def get_junos_startup_state(request): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) name = request.POST['name'] - - # always check network if possible regardless of deployment_backend - if "ip" in request.POST: - # this instance is auto-configured, so we can just check for IP here - response_data["network"] = osUtils.check_ip(request.POST["ip"]) - if configuration.deployment_backend == "kvm" and libvirtUtils.is_domain_running(name): # topologies/edit will fire multiple calls at once # let's just put a bit of a breather between each one response_data["power"] = True - if "ip" not in request.POST: + if "ip" in request.POST: + # this instance is auto-configured, so we can just check for IP here + response_data["network"] = osUtils.check_ip(request.POST["ip"]) + else: time.sleep(random.randint(0, 10) * .10) + response_data["console"] = consoleUtils.is_junos_device_at_prompt(name) elif configuration.deployment_backend == "openstack": time.sleep(random.randint(0, 20) * .10) response_data["power"] = True - # console no longer supported in openstack deployments - response_data["console"] = False + response_data["console"] = consoleUtils.is_junos_device_at_prompt(name) return HttpResponse(json.dumps(response_data), content_type="application/json") @@ -295,24 +293,20 @@ def get_linux_startup_state(request): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) name = request.POST['name'] - # always check network if possible regardless of deployment_backend - if "ip" in request.POST: - # this instance is auto-configured, so we can just check for IP here - response_data["network"] = osUtils.check_ip(request.POST["ip"]) if configuration.deployment_backend == "openstack": if openstackUtils.connect_to_openstack(): time.sleep(random.randint(0, 10) * .10) response_data["power"] = True - # as of 2018-01-01 we no longer support openstack console, this is dead code - # response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) - response_data['console'] = False + response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) else: if libvirtUtils.is_domain_running(name): time.sleep(random.randint(0, 10) * .10) response_data["power"] = True - # let's check the console only if we do not have network available to check - if "ip" not in request.POST: + if "ip" in request.POST: + # this instance is auto-configured, so we can just check for IP here + response_data["network"] = osUtils.check_ip(request.POST["ip"]) + else: response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) return HttpResponse(json.dumps(response_data), content_type="application/json") @@ -583,17 +577,11 @@ def refresh_openstack_deployment_status(request, topology_id): stack_details = openstackUtils.get_stack_details(stack_name) stack_resources = dict() logger.debug(stack_details) - if stack_details is not None and 'stack_status' in stack_details and 'COMPLETE' in stack_details["stack_status"]: + if stack_details is not None and stack_details["stack_status"] == "CREATE_COMPLETE": stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) - if hasattr(configuration, 'openstack_horizon_url'): - horizon_url = configuration.openstack_horizon_url - else: - horizon_url = 'http://' + configuration.openstack_host + '/dashboard' - context = {"stack": stack_details, "topology_id": topology.id, "openstack_host": configuration.openstack_host, - "openstack_horizon_url": horizon_url, "stack_resources": stack_resources } return render(request, 'ajax/openstackDeploymentStatus.html', context) @@ -640,7 +628,7 @@ def get_available_ip(request): # IP addresses. This makes the attempt to use 'old' ips that # are at least not still in use. logger.info("getting ips that are currently reserved via DHCP") - all_used_ips = wistarUtils.get_consumed_management_ips() + all_used_ips = wistarUtils.get_dhcp_reserved_ips() logger.debug(all_used_ips) next_ip = wistarUtils.get_next_ip(all_used_ips, 2) logger.debug(next_ip) @@ -1027,15 +1015,8 @@ def inline_deploy_topology(config): if device["cloudInitSupport"]: # grab the last interface management_interface = device["managementInterface"] - - # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 - if '/' in configuration.management_subnet: - management_prefix_len = configuration.management_subnet.split('/')[1] - else: - management_prefix_len = '24' - - management_ip = device['ip'] + '/' + management_prefix_len - + # this will come back to haunt me one day. Assume /24 for mgmt network is sprinkled everywhere! + management_ip = device["ip"] + "/24" # domain_name, host_name, mgmt_ip, mgmt_interface script_string = "" script_param = "" @@ -1438,15 +1419,11 @@ def deploy_stack(request, topology_id): return render(request, 'error.html', {'error': "Topology not found!"}) try: - # generate a stack name - # FIXME should add a check to verify this is a unique name - stack_name = topology.name.replace(' ', '_') - # let's parse the json and convert to simple lists and dicts logger.debug("loading config") config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) logger.debug("Config is loaded") - heat_template = wistarUtils.get_heat_json_from_topology_config(config, stack_name) + heat_template = wistarUtils.get_heat_json_from_topology_config(config) logger.debug("heat template created") if not openstackUtils.connect_to_openstack(): return render(request, 'error.html', {'error': "Could not connect to Openstack"}) @@ -1458,7 +1435,7 @@ def deploy_stack(request, topology_id): raise Exception("No project found for %s" % configuration.openstack_project) # FIXME - verify all images are in glance before jumping off here! - + stack_name = topology.name.replace(' ', '_') logger.debug(openstackUtils.create_stack(stack_name, heat_template)) return HttpResponseRedirect('/topologies/' + topology_id + '/') @@ -1486,3 +1463,167 @@ def delete_stack(request, topology_id): logger.debug(openstackUtils.delete_stack(stack_name)) return HttpResponseRedirect('/topologies/' + topology_id + '/') + +def update_stack(request, topology_id): + """ + :param request: Django request + :param topology_id: id of the topology to export + :return: renders the updated heat template + """ + logger.debug("-----Inside update stack-----") + try: + topology = Topology.objects.get(pk=topology_id) + except ObjectDoesNotExist: + return render(request, 'error.html', {'error': "Topology not found!"}) + try: + # let's parse the json and convert to simple lists and dicts + logger.debug("loading config") + config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) + logger.debug("Config is loaded") + + # get the tenant_id of the desired project + tenant_id = openstackUtils.get_project_id(configuration.openstack_project) + logger.debug("using tenant_id of: %s" % tenant_id) + if tenant_id is None: + raise Exception("No project found for %s" % configuration.openstack_project) + + # FIXME - verify all images are in glance before jumping off here! + stack_name = topology.name.replace(' ', '_') + + port_list = openstackUtils.get_stack_ports(stack_name, tenant_id) + print(port_list) + heat_template = wistarUtils.get_heat_json_from_topology_config_for_update(config, port_list) + logger.debug("heat template created---test1") + logger.debug(heat_template) + + logger.debug(openstackUtils.update_stack_template(stack_name, heat_template)) + + return HttpResponseRedirect('/topologies/' + topology_id + '/') + + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) + + + +def list_snapshot(request, topology_id): + """ + :param request: Django request + :param topology_id: id of the topology to export + :return: creates a snapshot of the heat template + """ + try: + logger.debug("Inside create Snapshot----------") + tenant_id = openstackUtils.get_project_id(configuration.openstack_project) + logger.debug("using tenant_id of: %s" % tenant_id) + if tenant_id is None: + raise Exception("No project found for %s" % configuration.openstack_project) + + logger.debug("Topology id -------------------: %s" % topology_id) + + + try: + topology = Topology.objects.get(pk=topology_id) + except ObjectDoesNotExist: + logger.error('topology id %s was not found!' % topology_id) + return render(request, 'error.html', {'error': "Topology not found!"}) + + # FIXME - verify all images are in glance before jumping off here! + stack_name = topology.name.replace(' ', '_') + snapshot_list = list() + logger.debug("-------------------stack_name--------------------: %s" % stack_name) + if openstackUtils.connect_to_openstack(): + snapshot_list = openstackUtils.get_snapshot_list(tenant_id, stack_name, topology_id) + + context = {'snapshot_list': snapshot_list} + logger.debug("Before rendering-----------") + return render(request, 'ajax/snapshot_list.html', context) + + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) + + +def rollback_snapshot(request, snapshot_id, topology_id): + """ + :param request: Django request + :param topology_id: id of the topology to export + :return: creates a snapshot of the heat template + """ + + try: + logger.debug("Inside rollback Snapshot----------") + tenant_id = openstackUtils.get_project_id(configuration.openstack_project) + logger.debug("using tenant_id of: %s" % tenant_id) + if tenant_id is None: + raise Exception("No project found for %s" % configuration.openstack_project) + + logger.debug("Topology id -------------------: %s" % topology_id) + + try: + topology = Topology.objects.get(pk=topology_id) + except ObjectDoesNotExist: + logger.error('topology id %s was not found!' % topology_id) + return render(request, 'error.html', {'error': "Topology not found!"}) + + # FIXME - verify all images are in glance before jumping off here! + stack_name = topology.name.replace(' ', '_') + logger.debug("Stack name: %s" % stack_name) + logger.debug("Snapshot id: %s" % snapshot_id) + + if openstackUtils.connect_to_openstack(): + logger.debug(openstackUtils.rollback_snapshot(tenant_id, stack_name, snapshot_id)) + + return HttpResponseRedirect('/topologies/' + topology_id + '/') + + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) + + + + + + + + +def delete_snapshot(request, snapshot_id, topology_id): + """ + :param request: Django request + :param topology_id: id of the topology to export + :return: creates a snapshot of the heat template + """ + + try: + logger.debug("Inside rollback Snapshot----------") + tenant_id = openstackUtils.get_project_id(configuration.openstack_project) + logger.debug("using tenant_id of: %s" % tenant_id) + if tenant_id is None: + raise Exception("No project found for %s" % configuration.openstack_project) + + logger.debug("Topology id -------------------: %s" % topology_id) + + + try: + topology = Topology.objects.get(pk=topology_id) + except ObjectDoesNotExist: + logger.error('topology id %s was not found!' % topology_id) + return render(request, 'error.html', {'error': "Topology not found!"}) + + # FIXME - verify all images are in glance before jumping off here! + stack_name = topology.name.replace(' ', '_') + logger.debug("Stack name: %s" % stack_name) + logger.debug("Snapshot id: %s" % snapshot_id) + + if openstackUtils.connect_to_openstack(): + logger.debug(openstackUtils.delete_snapshot(tenant_id, stack_name, snapshot_id)) + + return HttpResponseRedirect('/topologies/' + topology_id + '/') + + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index fc48a29..cec3f65 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -27,7 +27,7 @@ from wistar import configuration # OpenStack component URLs -# _glance_url = ':9292/v1' +_glance_url = ':9292' _analytics_url = ':8081' _api_url = ':8082' _os_url = ':5000/v3' @@ -62,21 +62,6 @@ def connect_to_openstack(): """ logger.debug("--- connect_to_openstack ---") - - logger.debug('verify configuration') - - if not hasattr(configuration, 'openstack_host'): - logger.error('Openstack Host is not configured') - return False - - if not hasattr(configuration, 'openstack_user'): - logger.error('Openstack User is not configured') - return False - - if not hasattr(configuration, 'openstack_password'): - logger.error('Openstack Password is not configured') - return False - global _auth_token global _tenant_id global _token_cache_time @@ -218,38 +203,7 @@ def get_project_id(project_name): return None -def get_network_id(network_name): - """ - Gets the UUID of the network by network_name - :param network_name: Name of the network - :return: string UUID or None - """ - - logger.debug("--- get_network_id ---") - - networks_url = create_neutron_url('/networks?name=%s' % network_name) - logger.info(networks_url) - networks_string = do_get(networks_url) - logger.info(networks_string) - if networks_string is None: - logger.error('Did not find a network for that name!') - return None - - try: - networks = json.loads(networks_string) - except ValueError: - logger.error('Could not parse json response in get_network_id') - return None - - for network in networks["networks"]: - if network["name"] == network_name: - logger.info('Found id!') - return str(network["id"]) - - return None - - -def upload_image_to_glance_old(name, image_file_path): +def upload_image_to_glance(name, image_file_path): """ :param name: name of the image to be uploaded @@ -285,144 +239,18 @@ def upload_image_to_glance_old(name, image_file_path): return None -def upload_image_to_glance(name, image_file_path): - """ - - :param name: name of the image to be created - :param image_file_path: path of the file to upload - :return: json encoded results string from glance REST api - """ - logger.debug("--- create_image_in_glance ---") - - url = create_glance_url('/images') - - try: - - d = dict() - d['disk_format'] = 'qcow2' - d['container_format'] = 'bare' - d['name'] = name - - r_data = do_post(url, json.dumps(d)) - - except Exception as e: - logger.error("Could not upload image to glance") - logger.error("error was %s" % str(e)) - return None - - try: - r_json = json.loads(r_data) - if 'id' in r_json: - image_id = r_json['id'] - - logger.info('Preparing to push image data to glance!') - f = open(image_file_path, 'rb') - fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) - upload_url = create_glance_url('/images/%s/file' % image_id) - request = urllib2.Request(upload_url, fio) - request.add_header("Content-Type", "application/octet-stream") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'PUT' - return urllib2.urlopen(request) - else: - logger.error('Could not find an ID key in returned json from glance image create') - logger.error(r_data) - logger.error('returning None') - return None - - except ValueError: - logger.error('Could not parse JSON return data from glance image create') - return None - - -def get_neutron_ports_for_network(network_name): - """ - :return: json response from /ports URL - """ - logger.debug("--- get_neutron_port_list ---") - - network_id = get_network_id(network_name) - if network_id is None: - logger.warn("couldn't find the correct network_id") - return None - - url = create_neutron_url("/ports.json?network_id=%s&fields=id&fields=fixed_ips" % network_id) - logger.debug(url) - port_list_string = do_get(url) - logger.debug(port_list_string) - - return port_list_string - - -def get_consumed_management_ips(): - """ - Return a list of dicts of the format - [ - { "ip-address": "xxx.xxx.xxx.xxx"} - ] - This mimics the libvirt dnsmasq format for dhcp reservations - This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to - get all reserved management ips - :return: list of dicts - """ - consumed_ips = list() - ports_string = get_neutron_ports_for_network(configuration.openstack_mgmt_network) - if ports_string is None: - return consumed_ips - try: - ports = json.loads(ports_string) - except ValueError: - logger.error('Could not parse json response in get_consumed_management_ips') - return consumed_ips - - if 'ports' not in ports: - logger.error('unexpected keys in json response!') - return consumed_ips - - for port in ports['ports']: - for fixed_ip in port['fixed_ips']: - if configuration.management_prefix in fixed_ip['ip_address']: - fip = dict() - fip['ip-address'] = fixed_ip['ip_address'] - consumed_ips.append(fip) - - return consumed_ips - - def get_glance_image_list(): """ - :return: list of json objects from glance /images URL filtered with only shared or public images + :return: json response from glance /images/ URL """ logger.debug("--- get_glance_image_list ---") url = create_glance_url("/images") image_list_string = do_get(url) - - image_list = list() - if image_list_string is None: - return image_list - - try: - glance_return = json.loads(image_list_string) - except ValueError: - logger.warn('Could not parse json response from glance /images') - return image_list - - if 'images' not in glance_return: - logger.warn('did not find images key in glance return data') - logger.debug(glance_return) - return image_list - - for im in glance_return['images']: - - if 'status' in im and im['status'] != 'active': - logger.debug('Skipping non-active image %s' % im['name']) - continue - - if 'visibility' in im and im['visibility'] in ['shared', 'public']: - image_list.append(im) + return None + image_list = json.loads(image_list_string) return image_list @@ -450,10 +278,10 @@ def get_image_id_for_name(image_name): logger.debug("--- get_image_id_for_name ---") image_list = get_glance_image_list() - if image_list is None or len(image_list) == 0: + if image_list is None: return None - for image in image_list: + for image in image_list["images"]: if image["name"] == image_name: return image["id"] @@ -526,32 +354,10 @@ def get_nova_flavors(project_name): def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): - """ - Query nova to get all flavors and return the flavor that best matches our desired constraints - :param project_name: name of the project to check for flavors - :param cpu: number of cores desired - :param ram: amount of ram desired in MB - :param disk: amount of disk required in GB - :return: flavor object {"name": "m1.xlarge"} - """ logger.debug("checking: " + str(cpu) + " " + str(ram) + " " + str(disk)) - - # create an emergency flavor so we have something to return in case we can't connect to openstack - # or some other issue prevents us from determining the right thing to do - emergency_flavor = dict() - emergency_flavor['name'] = "m1.xlarge" - - if not connect_to_openstack(): - return emergency_flavor - flavors = get_nova_flavors(project_name) - try: - flavors_object = json.loads(flavors) - except ValueError: - logger.error('Could not parse nova return data') - return emergency_flavor - + flavors_object = json.loads(flavors) cpu_candidates = list() ram_candidates = list() disk_candidates = list() @@ -589,7 +395,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): if len(disk_candidates) == 0: # uh-oh, just return the largest and hope for the best! - return emergency_flavor + return "m1.xlarge" elif len(disk_candidates) == 1: return disk_candidates[0] else: @@ -597,7 +403,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): # let's find the smallest flavor left! cpu_low = 99 disk_low = 999 - ram_low = 99999 + ram_low = 9999 for f in disk_candidates: if f["vcpus"] < cpu_low: cpu_low = f["vcpus"] @@ -648,6 +454,209 @@ def create_stack(stack_name, template_string): return do_post(url, data) + +def create_stack_snapshot(stack_name, tenant_id, snapshot_name): + """ + Creates a snapshot of the Stack via a HEAT REST call + :param stack_name: name of the stack to create + :param tenant_id: tenant id of the openstack project + :param snapshot_name: name of the snapshot + :return: JSON response from HEAT-API or None on failure + """ + logger.debug("In create stack snapshot--------------") + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = str(stack_details["id"]) + + #stack_id = get_stack_details(stack_name) + create_snapshot_url = create_heat_url("/" + str(tenant_id) + "/stacks/%s/%s/snapshots" % (stack_name, stack_id)) + data = """{ + "name": "%s" + }""" % snapshot_name + logger.debug("Data before posting-----------") + logger.debug(data) + + return do_post(create_snapshot_url, data) + + + +def get_snapshot_list(tenant_id, stack_name, topology_id): + + print("In snapshot list") + + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = str(stack_details["id"]) + snapshot_list_url = create_heat_url('/%s/stacks/%s/%s/snapshots' % (tenant_id, stack_name, stack_id)) + stack_snapshot_list = do_get(snapshot_list_url) + l1 = json.loads(stack_snapshot_list) + #l2 = l1['snapshots'] + snap_list = list() + + for snap in l1["snapshots"]: + if snap["status"] == "COMPLETE": + snap_detail = get_snap_detail(snap, stack_name, topology_id) + snap_list.append(snap_detail) + logger.debug(snap_list) + return snap_list + +def get_snap_detail(snap, stack_name, topology_id): + + logger.debug("Getting snapshot details-------------") + logger.debug(snap) + snap_list = dict() + snap_list["name"] = snap["name"] + snap_list["id"] = snap["id"] + snap_list["stack_name"] = stack_name + snap_list["topology_id"] = str(topology_id) + + return snap_list + +def get_server_details(search_string): + server_details_url = create_nova_url('/servers?name=%s' % search_string) + server_details_json = do_get(server_details_url) + server_details_dict = json.loads(server_details_json) + + if server_details_dict is None: + return None + else: + server_details_list = list() + server_details_list = server_details_dict['servers'] + for det in server_details_list: + server_details = det['id'] + logger.debug(server_details) + return server_details + + + + +def delete_snapshot(tenant_id, stack_name, snapshot_id): + """ + Deletes a stack from OpenStack + :param stack_name: name of the stack to be deleted + :return: JSON response fro HEAT API + """ + logger.debug("--- delete_stack_snapshot ---") + + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = stack_details["id"] + url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s" % (tenant_id, stack_name, stack_id, snapshot_id)) + return do_delete(url) + + + +def rollback_snapshot(tenant_id, stack_name, snapshot_id): + """ + Deletes a stack from OpenStack + :param stack_name: name of the stack to be deleted + :return: JSON response fro HEAT API + """ + logger.debug("--- delete_stack_snapshot ---") + data = "" + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = stack_details["id"] + url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s/restore" % (tenant_id, stack_name, stack_id, snapshot_id)) + return do_post(url, data) + + + +def rebuild_instance_openstack(server_id, image_id): + logger.debug("-------Rebuild server openstack") + url = create_nova_url("/servers/%s/action" % server_id) + data = '''{ + "rebuild" : { + "imageRef" : "%s" + } + }''' % str(image_id) + + return do_post(url, data) + + + + +def update_stack_template(stack_name, template_string): + + """ + Creates a Stack via a HEAT template + :param stack_name: name of the stack to create + :param template_string: HEAT template to be used + :return: JSON response from HEAT-API or None on failure + """ + logger.debug("--- update_stack ---") + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = stack_details["id"] + + url = create_heat_url("/" + str(_tenant_id) + "/stacks/%s/%s" % (stack_name, stack_id)) + logger.debug("URL to update stack") + logger.debug(url) + data = '''{ + "disable_rollback": true, + "parameters": {}, + "template": %s + }''' % (template_string) + logger.debug("updating CREATING stack with data:") + logger.debug(data) + try: + request = urllib2.Request(url) + request.add_header("Content-Type", "application/json") + request.add_header("charset", "UTF-8") + request.add_header("X-Auth-Token", _auth_token) + request.get_method = lambda: 'PATCH' + + if data == "": + result = urllib2.urlopen(request) + else: + result = urllib2.urlopen(request, data) + + return result.read() + except URLError as e: + logger.error("Could not perform PUT to url: %s" % url) + logger.error("error was %s" % str(e)) + return None + #return do_post(url, data) + + + + +def get_stack_ports(stack_name, tenant_id): + stack_details = get_stack_details(stack_name) + + if stack_details is None: + return None + else: + stack_id = str(stack_details["id"]) + + try: + get_port_url = create_heat_url( '/%s/stacks/%s/%s/resources?type=OS::Neutron::Port' % (tenant_id, stack_name, stack_id)) + resources = do_get(get_port_url) + resource_dict = json.loads(resources) + resource_list = resource_dict['resources'] + print(resource_list) + port_list = list() + for port in resource_list: + port_list.append(port['resource_name']) + print(port_list) + return port_list + + except URLError as e: + logger.error("Could not perform PUT to url: %s" % url) + logger.error("error was %s" % str(e)) + return None + + def get_nova_serial_console(instance_name): """ Get the websocket URL for the serial proxy for a given nova server (instance) @@ -705,10 +714,6 @@ def create_glance_url(url): return "http://" + configuration.openstack_host + _glance_url + url -def create_neutron_url(url): - return "http://" + configuration.openstack_host + _neutron_url + url - - def create_os_url(url): return "http://" + configuration.openstack_host + _os_url + url diff --git a/common/lib/wistarUtils.py b/common/lib/wistarUtils.py index cdaf800..d4cf564 100644 --- a/common/lib/wistarUtils.py +++ b/common/lib/wistarUtils.py @@ -19,7 +19,6 @@ import json import logging -import math import os import re import subprocess @@ -76,28 +75,26 @@ def _generate_mac(topology_id): """ silly attempt to keep mac addresses unique use the topology id to generate 2 octets, and the number of - macs used so far to generate the last two octets. - Uses the locally administered address ranges 52:54:00 through 52:54:FF - :param topology_id: string id of the topology we are building + macs used so far to generate the last one + :param topology_id: id of the topology we are building :return: mostly unique mac address that should be safe to deploy """ - tid = int(topology_id) global mac_counter global used_macs - base = '52:54:00:00:00:00' - ba = base.split(':') - ba[2] = '%02x' % int(tid / 256) - ba[3] = '%02x' % int(tid % 256) - ba[4] = '%02x' % int(len(used_macs[topology_id]) / 256) - ba[5] = '%02x' % int(mac_counter) + b1 = "52:54:" + b2 = '%02x' % int(len(used_macs[topology_id]) / 256) + base = b1 + str(b2) + ":" + tid = "%04x" % int(topology_id) + mac_base = base + str(tid[:2]) + ":" + str(tid[2:4]) + ":" + mac = mac_base + (str("%02x" % mac_counter)[:2]) mac_counter += 1 mac_counter = mac_counter % 256 - return ':'.join(ba) + return mac -def get_heat_json_from_topology_config(config, project_name='admin'): +def get_heat_json_from_topology_config(config): """ Generates heat template from the topology configuration object use load_config_from_topology_json to get the configuration from the Topology @@ -122,11 +119,10 @@ def get_heat_json_from_topology_config(config, project_name='admin'): nrs = dict() nrs["type"] = "OS::Neutron::Subnet" - # + p = dict() p["cidr"] = "1.1.1.0/24" p["enable_dhcp"] = False - p["gateway_ip"] = "" p["name"] = network["name"] + "_subnet" if network["name"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network @@ -153,21 +149,10 @@ def get_heat_json_from_topology_config(config, project_name='admin'): image_details_dict[device["imageId"]] = image_details image_name = image_details["name"] - - image_disk_size = 20 - - # set the size in GB, rounding up to the nearest int - if 'size' in image_details: - current_size = int(image_details['size']) - image_disk_size = int(math.ceil(current_size / 100000000)) - - # if the flavor asks for a minimum disk size, let's see if it's larger that what we have - if "min_disk" in image_details and image_details['min_disk'] > image_disk_size: - image_disk_size = image_details["min_disk"] - - # if the user has specified a desired disk size, grab it here so we get the correct flavor - if type(image_disk_size) is int and device["resizeImage"] > image_disk_size: - image_disk_size = device["resizeImage"] + if "disk" in image_details: + image_disk = image_details["disk"] + else: + image_disk = 20 # determine openstack flavor here device_ram = int(device["ram"]) @@ -176,7 +161,7 @@ def get_heat_json_from_topology_config(config, project_name='admin'): flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, device_cpu, device_ram, - image_disk_size + image_disk ) flavor = flavor_detail["name"] @@ -211,7 +196,6 @@ def get_heat_json_from_topology_config(config, project_name='admin'): for cfp in device["configDriveParams"]: if "destination" in cfp and cfp["destination"] == "/boot/loader.conf": - logger.debug("Creating loader.conf config-drive entry") template_name = cfp["template"] loader_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -220,17 +204,12 @@ def get_heat_json_from_topology_config(config, project_name='admin'): device["ip"], device["managementInterface"]) - logger.debug('----------') - logger.debug(loader_string) - logger.debug('----------') - for l in loader_string.split('\n'): - if '=' in l: - left, right = l.split('=') - if left not in metadata and left != '': - metadata[left] = right.replace('"', '') + for l in loader_string: + left, right = l.split('=') + if left not in metadata: + metadata[left] = right if "destination" in cfp and cfp["destination"] == "/juniper.conf": - logger.debug("Creating juniper.conf config-drive entry") template_name = cfp["template"] personality_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -241,43 +220,166 @@ def get_heat_json_from_topology_config(config, project_name='admin'): dr["properties"]["personality"] = dict() dr["properties"]["personality"] = {"/config/juniper.conf": personality_string} - else: - logger.debug('No juniper.conf found here ') - if device['cloudInitSupport']: - logger.debug('creating cloud-init script') + template["resources"][device["name"]] = dr + + for device in config["devices"]: + index = 0 + for port in device["interfaces"]: + pr = dict() + pr["type"] = "OS::Neutron::Port" + p = dict() + + if port["bridge"] == "virbr0": + p["network_id"] = configuration.openstack_mgmt_network + elif port["bridge"] == configuration.openstack_external_network: + p["network_id"] = configuration.openstack_external_network + else: + p["network_id"] = {"get_resource": port["bridge"]} + p["name"] = device["name"] + "_port" + str(index) + + pr["properties"] = p + template["resources"][device["name"] + "_port" + str(index)] = pr + index += 1 + + return json.dumps(template) + + + + + + + + +def get_heat_json_from_topology_config_for_update(config, port_list): + """ + Generates heat template from the topology configuration object + use load_config_from_topology_json to get the configuration from the Topology + :param config: configuration dict from load_config_from_topology_json + :return: json encoded heat template as String + """ + + template = dict() + template["heat_template_version"] = "2013-05-23" + template["resources"] = dict() + + for network in config["networks"]: + nr = dict() + nr["type"] = "OS::Neutron::Net" + + nrp = dict() + nrp["shared"] = False + nrp["name"] = network["name"] + nrp["admin_state_up"] = True + + nr["properties"] = nrp + + nrs = dict() + nrs["type"] = "OS::Neutron::Subnet" + + p = dict() + p["cidr"] = "1.1.1.0/24" + p["enable_dhcp"] = False + p["name"] = network["name"] + "_subnet" + if network["name"] == "virbr0": + p["network_id"] = configuration.openstack_mgmt_network + elif network["name"] == configuration.openstack_external_network: + p["network_id"] = configuration.openstack_external_network + else: + p["network_id"] = {"get_resource": network["name"]} + + nrs["properties"] = p + + template["resources"][network["name"]] = nr + template["resources"][network["name"] + "_subnet"] = nrs + + # cache the image_details here to avoid multiple REST calls for details about an image type + # as many topologies have lots of the same types of images around + image_details_dict = dict() + + for device in config["devices"]: + + if device["imageId"] in image_details_dict: + image_details = image_details_dict[device["imageId"]] + else: + image_details = imageUtils.get_image_detail(device["imageId"]) + image_details_dict[device["imageId"]] = image_details + + image_name = image_details["name"] + if "disk" in image_details: + image_disk = image_details["disk"] + else: + image_disk = 20 + + # determine openstack flavor here + device_ram = int(device["ram"]) + device_cpu = int(device["cpu"]) + + flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, + device_cpu, + device_ram, + image_disk + ) + + flavor = flavor_detail["name"] + + dr = dict() + dr["type"] = "OS::Nova::Server" + dr["properties"] = dict() + dr["properties"]["flavor"] = flavor + dr["properties"]["networks"] = [] + index = 0 + for p in device["interfaces"]: + port = dict() + port["port"] = dict() + if device["name"] + "_port" + str(index) in port_list: + port["port"]["get_resource"] = device["name"] + "_port" + str(index) + "_nora" + else: + port["port"]["get_resource"] = device["name"] + "_port" + str(index) + index += 1 + dr["properties"]["networks"].append(port) + + dr["properties"]["image"] = image_name + dr["properties"]["name"] = device["name"] + + if device["configDriveSupport"]: dr["properties"]["config_drive"] = True dr["properties"]["user_data_format"] = "RAW" metadata = dict() metadata["hostname"] = device["name"] + metadata["console"] = "vidconsole" dr["properties"]["metadata"] = metadata - # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 - if '/' in configuration.management_subnet: - management_prefix_len = configuration.management_subnet.split('/')[1] - else: - management_prefix_len = '24' - management_ip = device['ip'] + '/' + management_prefix_len + # let's check all the configDriveParams and look for a junos config + # FIXME - this may need tweaked if we need to include config drive cloud-init support for other platforms + # right now we just need to ignore /boot/loader.conf + for cfp in device["configDriveParams"]: + + if "destination" in cfp and cfp["destination"] == "/boot/loader.conf": + template_name = cfp["template"] + loader_string = osUtils.compile_config_drive_params_template(template_name, + device["name"], + device["label"], + device["password"], + device["ip"], + device["managementInterface"]) - device_config = osUtils.get_cloud_init_config(device['name'], - device['label'], - management_ip, - device['managementInterface'], - device['password']) + for l in loader_string: + left, right = l.split('=') + if left not in metadata: + metadata[left] = right - script_string = "" - if "configScriptId" in device and device["configScriptId"] != 0: - logger.debug("Passing script data!") - try: - script = Script.objects.get(pk=int(device["configScriptId"])) - script_string = script.script - device_config["script_param"] = device.get("configScriptParam", '') - logger.debug(script_string) - except ObjectDoesNotExist: - logger.info('config script was specified but was not found!') + if "destination" in cfp and cfp["destination"] == "/juniper.conf": + template_name = cfp["template"] + personality_string = osUtils.compile_config_drive_params_template(template_name, + device["name"], + device["label"], + device["password"], + device["ip"], + device["managementInterface"]) - user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string) - dr["properties"]["user_data"] = user_data_string + dr["properties"]["personality"] = dict() + dr["properties"]["personality"] = {"/config/juniper.conf": personality_string} template["resources"][device["name"]] = dr @@ -290,27 +392,28 @@ def get_heat_json_from_topology_config(config, project_name='admin'): if port["bridge"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network - - # specify our desired IP address on the management interface - p['fixed_ips'] = list() - fip = dict() - fip['ip_address'] = device['ip'] - p['fixed_ips'].append(fip) - elif port["bridge"] == configuration.openstack_external_network: p["network_id"] = configuration.openstack_external_network else: p["network_id"] = {"get_resource": port["bridge"]} - # disable port security on all other ports (in case this isn't set globally) - p['port_security_enabled'] = False + if device["name"] + "_port" + str(index) in port_list: + p["name"] = device["name"] + "_port" + str(index) + "_nora" + else: + p["name"] = device["name"] + "_port" + str(index) pr["properties"] = p - template["resources"][device["name"] + "_port" + str(index)] = pr + + if device["name"] + "_port" + str(index) in port_list: + template["resources"][device["name"] + "_port" + str(index) + "_nora"] = pr + else: + template["resources"][device["name"] + "_port" + str(index)] = pr index += 1 return json.dumps(template) + + def _get_management_macs_for_topology(topology_id): """ returns a list of all macs used for management interfaces for a topology @@ -344,10 +447,7 @@ def load_config_from_topology_json(topology_json, topology_id): # preload all the existing management mac addresses if any global used_macs - if configuration.deployment_backend == "kvm": - used_macs[topology_id] = _get_management_macs_for_topology(topology_id) - else: - used_macs[topology_id] = list() + used_macs[topology_id] = _get_management_macs_for_topology(topology_id) json_data = json.loads(topology_json) @@ -367,7 +467,8 @@ def load_config_from_topology_json(topology_json, topology_id): # has this topology already been deployed? is_deployed = False - if len(used_macs[topology_id]) > 0: + existing_macs = _get_management_macs_for_topology(topology_id) + if len(existing_macs) > 0: # yep, already been deployed is_deployed = True @@ -453,18 +554,15 @@ def load_config_from_topology_json(topology_json, topology_id): device["uuid"] = json_object.get('id', '') device["interfaces"] = [] - device['vncPort'] = 0 - if configuration.deployment_backend == "kvm": - # determine next available VNC port that has not currently been assigned - next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) - - # verify that this port is not actually in use by another process - while osUtils.check_port_in_use(next_vnc_port): - device_index += 1 - next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) + # determine next available VNC port that has not currently been assigned + next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) - device["vncPort"] = next_vnc_port + # verify that this port is not actually in use by another process + while osUtils.check_port_in_use(next_vnc_port): + device_index += 1 + next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) + device["vncPort"] = next_vnc_port # is this a child VM? # children will *always* have a parent attribute set in their userdata parent_id = user_data.get("parent", "") @@ -507,10 +605,7 @@ def load_config_from_topology_json(topology_json, topology_id): # management interface mi will always be connected to default management network (virbr0 on KVM) mi = dict() - # slight optimization for kvm backend, dont generate new mac - if configuration.deployment_backend == "kvm" and \ - is_deployed and \ - libvirtUtils.domain_exists(device['name']): + if is_deployed and libvirtUtils.domain_exists(device['name']): mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name']) else: mi['mac'] = generate_next_mac(topology_id) @@ -689,9 +784,7 @@ def load_config_from_topology_json(topology_json, topology_id): if d["mgmtInterfaceIndex"] == -1: mi = dict() # if this has already been deployed, let's preserve the existing mac address that has been assigned - if configuration.deployment_backend == "kvm" and \ - is_deployed and \ - libvirtUtils.domain_exists(device['name']): + if is_deployed and libvirtUtils.domain_exists(device['name']): mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name']) else: mi['mac'] = generate_next_mac(topology_id) @@ -900,7 +993,7 @@ def get_used_ips(): # logger.info(last_octet) all_ips.append(int(last_octet)) - dhcp_leases = get_consumed_management_ips() + dhcp_leases = get_dhcp_reserved_ips() all_ips.extend(dhcp_leases) logger.debug("sorting and returning all_ips") @@ -908,35 +1001,26 @@ def get_used_ips(): return all_ips -def get_consumed_management_ips(): - """ - Return a list of all ip addresses that are currently consumed on the wistar management network - THIS ASSUMES A /24 for THE MANAGEMENT NETWORK! - :return: a list of ints representing the last octet of the /24 management network - """ +def get_dhcp_reserved_ips(): + # pull current ips out of dhcp reservations and leases files + # return as a single list all_ips = list() - # let's also grab consumed management ips as well - if configuration.deployment_backend == "openstack": - if openstackUtils.connect_to_openstack(): - dhcp_leases = openstackUtils.get_consumed_management_ips() - else: - return all_ips - else: - dhcp_leases = osUtils.get_dhcp_leases() - # let's also grab current dhcp reservations - dhcp_reservations = osUtils.get_dhcp_reservations() - for dr in dhcp_reservations: - ip = str(dr["ip-address"]) - last_octet = ip.split('.')[-1] - all_ips.append(int(last_octet)) - + # let's also grab current dhcp leases as well + dhcp_leases = osUtils.get_dhcp_leases() for lease in dhcp_leases: ip = str(lease["ip-address"]) logger.debug("adding active lease %s" % ip) last_octet = ip.split('.')[-1] all_ips.append(int(last_octet)) + # let's also grab current dhcp reservations + dhcp_leases = osUtils.get_dhcp_reservations() + for lease in dhcp_leases: + ip = str(lease["ip-address"]) + last_octet = ip.split('.')[-1] + all_ips.append(int(last_octet)) + return all_ips diff --git a/common/static/js/topology_utils.js b/common/static/js/topology_utils.js index d74907d..d985923 100644 --- a/common/static/js/topology_utils.js +++ b/common/static/js/topology_utils.js @@ -67,6 +67,14 @@ function setImageType() { } } + + + + + + + + function addIconAndClose() { rv = addIcon(); if (rv == true) { diff --git a/topologies/templates/topologies/edit.html b/topologies/templates/topologies/edit.html index e7d823e..8949579 100644 --- a/topologies/templates/topologies/edit.html +++ b/topologies/templates/topologies/edit.html @@ -39,8 +39,6 @@ - - @@ -181,7 +179,7 @@ } updateBootCounter++; console.log("updating boot up state for topology"); - for(var v=0;v
+
+ +
+
+ {% if global_config.deployment_backend == "openstack" and is_deployed == true %} + + + + {% endif %} @@ -2083,9 +2137,7 @@

- - - + @@ -2155,6 +2207,11 @@ + {% if global_config.deployment_backend == "openstack" and is_deployed == true %} + + + + {% endif %} @@ -2528,6 +2585,43 @@
+ + +
+
+ + + + + + + + + + + + + + + + +
+ +
+
+ {% if topo_id != None %}
diff --git a/topologies/templates/topologies/overlay/rebuild_instance.html b/topologies/templates/topologies/overlay/rebuild_instance.html new file mode 100644 index 0000000..d31ce7d --- /dev/null +++ b/topologies/templates/topologies/overlay/rebuild_instance.html @@ -0,0 +1,62 @@ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ X +
+

Rebuild Instance

+
+ Instance Name + + {{instance_name}} +
+ Base Image + + +
+ + + + + +   + +
+ diff --git a/topologies/urls.py b/topologies/urls.py index ce47536..ac816ef 100644 --- a/topologies/urls.py +++ b/topologies/urls.py @@ -31,6 +31,9 @@ url(r'^error/$', views.error, name='error'), url(r'^clone/(?P\d+)/$', views.clone, name='clone'), url(r'^createConfigSet/$', views.create_config_set, name='createConfigSet'), + url(r'^createSnapshot/$', views.create_snapshot_topo, name='createSnapshot'), + url(r'^rebuildInstance/$', views.rebuild_instance, name='rebuildInstance'), + url(r'^rebuildServer/$', views.rebuild_server, name='rebuildServer'), url(r'^delete/(?P\d+)/$', views.delete, name='delete'), url(r'^(?P\d+)/$', views.detail, name='detail'), url(r'^launch/(?P\d+)$', views.launch, name='launch'), diff --git a/topologies/views.py b/topologies/views.py index e6c1f85..b10bec4 100644 --- a/topologies/views.py +++ b/topologies/views.py @@ -33,7 +33,6 @@ from common.lib import junosUtils from common.lib import libvirtUtils from common.lib import osUtils -from common.lib import ovsUtils from common.lib import wistarUtils from common.lib import openstackUtils @@ -68,22 +67,19 @@ def edit(request): def new(request): logger.debug('---- topology new ----') - + image_list = Image.objects.all().order_by('name') script_list = Script.objects.all().order_by('name') vm_types = configuration.vm_image_types vm_types_string = json.dumps(vm_types) + image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) currently_allocated_ips = wistarUtils.get_used_ips() - dhcp_reservations = wistarUtils.get_consumed_management_ips() + dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() if configuration.deployment_backend == "openstack": external_bridge = configuration.openstack_external_network - image_list = Image.objects.filter(filePath='').order_by('name') else: external_bridge = configuration.kvm_external_bridge - image_list = Image.objects.exclude(filePath='').order_by('name') - - image_list_json = serializers.serialize('json', image_list, fields=('name', 'type')) context = {'image_list': image_list, 'script_list': script_list, 'vm_types': vm_types_string, 'image_list_json': image_list_json, @@ -130,7 +126,7 @@ def import_topology(request): logger.debug("Iterating json objects in imported data") for json_object in json_data: if "userData" in json_object and "wistarVm" in json_object["userData"]: - # logger.debug("Found one") + logger.debug("Found one") ud = json_object["userData"] # check if we have this type of image image_list = Image.objects.filter(type=ud["type"]) @@ -141,7 +137,7 @@ def import_topology(request): '! Please upload an image of this type and try again') image = image_list[0] - # logger.debug(str(image.id)) + logger.debug(str(image.id)) json_object["userData"]["image"] = image.id valid_ip = wistarUtils.get_next_ip(currently_allocated_ips, next_ip_floor) @@ -161,14 +157,11 @@ def import_topology(request): vm_types = configuration.vm_image_types vm_types_string = json.dumps(vm_types) - dhcp_reservations = wistarUtils.get_consumed_management_ips() - context = {'image_list': image_list, 'image_list_json': image_list_json, 'allocated_ips': currently_allocated_ips, 'script_list': script_list, 'vm_types': vm_types_string, - 'dhcp_reservations': dhcp_reservations, 'topo': topology } @@ -202,7 +195,7 @@ def clone(request, topo_id): currently_allocated_ips = wistarUtils.get_used_ips() cloned_ips = wistarUtils.get_used_ips_from_topology_json(topology.json) - dhcp_reservations = wistarUtils.get_consumed_management_ips() + dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() currently_allocated_ips += cloned_ips @@ -280,19 +273,11 @@ def delete(request, topology_id): if configuration.deployment_backend == "kvm": - if hasattr(configuration, "use_openvswitch") and configuration.use_openvswitch: - use_ovs = True - else: - use_ovs = False - network_list = libvirtUtils.get_networks_for_topology(topology_prefix) for network in network_list: logger.debug("undefine network: " + network["name"]) libvirtUtils.undefine_network(network["name"]) - if use_ovs: - ovsUtils.delete_bridge(network["name"]) - domain_list = libvirtUtils.get_domains_for_topology(topology_prefix) for domain in domain_list: @@ -486,7 +471,7 @@ def add_instance_form(request): image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) currently_allocated_ips = wistarUtils.get_used_ips() - dhcp_reservations = wistarUtils.get_consumed_management_ips() + dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() if configuration.deployment_backend == "openstack": external_bridge = configuration.openstack_external_network @@ -502,3 +487,124 @@ def add_instance_form(request): 'dhcp_reservations': dhcp_reservations, } return render(request, 'topologies/overlay/add_instance.html', context) + + +def rebuild_instance(request): + logger.info('---------rebuild instance--------') + required_fields = set(['instance_name']) + if not required_fields.issubset(request.POST): + return render(request, 'ajax/overlayError.html', {'error': "Invalid Parameters in POST"}) + + instance_name = request.POST['instance_name'] + topo_id = request.POST['topology_id'] + try: + image_list_linux = list() + image_list = Image.objects.all().order_by('name') + for i in image_list: + if not (i.type.startswith('junos')): + + image_list_linux.append(i) + + vm_types = configuration.vm_image_types + vm_types_string = json.dumps(vm_types) + logger.debug(vm_types_string) + + image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) + logger.debug(image_list_json) + + search_string = 't%s_%s' % (topo_id, instance_name) + logger.debug(search_string) + if openstackUtils.connect_to_openstack(): + server_id = openstackUtils.get_server_details(search_string) + context = {'image_list': image_list_linux, + 'instance_name': instance_name, + 'vm_types': vm_types_string, + 'image_list_json': image_list_json, + 'server_id': server_id, + 'topo_id': topo_id + } + return render(request, 'topologies/overlay/rebuild_instance.html', context) + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) + + +def rebuild_server(request): + + try: + logger.debug("Inside the rebuild method") + required_fields = set(['topoIconImageSelect', 'topo_id', 'server_id']) + if not required_fields.issubset(request.POST): + return render(request, 'ajax/overlayError.html', {'error': "Invalid Parameters in POST"}) + topology_id = request.POST["topo_id"] + server_id = request.POST["server_id"] + image_string = request.POST["topoIconImageSelect"].split(":")[2] + if openstackUtils.connect_to_openstack(): + image_id = openstackUtils.get_image_id_for_name(image_string) + + logger.debug("Parameters %s %s %s" % (server_id, topology_id, image_id)) + + if openstackUtils.connect_to_openstack(): + res = openstackUtils.rebuild_instance_openstack(server_id, image_id) + + logger.debug("----------------Response----------------") + + if res is None: + return render(request, 'error.html', {'error': "Not able to rebuild the server"}) + else: + return HttpResponseRedirect('/topologies/' + topology_id + '/') + + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) + + + + +def create_snapshot_topo(request): + + + """ + :param request: Django request + :param topology_id: id of the topology to export + :param snap_name: id of the topology to export + :return: creates a snapshot of the heat template + """ + try: + logger.debug("Inside create Snapshot----------") + tenant_id = openstackUtils.get_project_id(configuration.openstack_project) + logger.debug("using tenant_id of: %s" % tenant_id) + if tenant_id is None: + raise Exception("No project found for %s" % configuration.openstack_project) + logger.debug(request.POST) + required_fields = set(['snap_name', 'snapshot_topo_id']) + if not required_fields.issubset(request.POST): + return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) + + topology_id = request.POST["snapshot_topo_id"] + snap_name = request.POST["snap_name"] + logger.debug("using tenant_id of: %s" % tenant_id) + logger.debug("Topology id -------------------: %s" % topology_id) + logger.debug("Snap name -------------------: %s" % snap_name) + + try: + topology = Topology.objects.get(pk=topology_id) + except ObjectDoesNotExist: + logger.error('topology id %s was not found!' % topology_id) + return render(request, 'error.html', {'error': "Topology not found!"}) + + # FIXME - verify all images are in glance before jumping off here! + stack_name = topology.name.replace(' ', '_') + + logger.debug("-------------------stack_name--------------------: %s" % stack_name) + if openstackUtils.connect_to_openstack(): + logger.debug(openstackUtils.create_stack_snapshot(stack_name, tenant_id, snap_name)) + + return HttpResponseRedirect('/topologies/' + topology_id + '/') + + except Exception as e: + logger.debug("Caught Exception in deploy") + logger.debug(str(e)) + return render(request, 'error.html', {'error': str(e)}) From 1c42154ab1c1e7c944643696ecdda61ab4f47fb4 Mon Sep 17 00:00:00 2001 From: anuragmenon2011 Date: Thu, 12 Apr 2018 18:15:34 -0400 Subject: [PATCH 02/10] Revert "Changes for snapshots" --- .../ajax/openstackDeploymentStatus.html | 138 ++---- ajax/templates/ajax/snapshot_list.html | 61 --- ajax/urls.py | 8 - ajax/views.py | 223 ++-------- common/lib/openstackUtils.py | 421 +++++++++--------- common/lib/wistarUtils.py | 328 +++++--------- common/static/js/topology_utils.js | 8 - topologies/templates/topologies/edit.html | 132 +----- .../topologies/overlay/rebuild_instance.html | 62 --- topologies/urls.py | 3 - topologies/views.py | 150 +------ 11 files changed, 447 insertions(+), 1087 deletions(-) delete mode 100644 ajax/templates/ajax/snapshot_list.html delete mode 100644 topologies/templates/topologies/overlay/rebuild_instance.html diff --git a/ajax/templates/ajax/openstackDeploymentStatus.html b/ajax/templates/ajax/openstackDeploymentStatus.html index dfa8d22..7c7ca87 100644 --- a/ajax/templates/ajax/openstackDeploymentStatus.html +++ b/ajax/templates/ajax/openstackDeploymentStatus.html @@ -1,22 +1,14 @@ {% load staticfiles %} - - - - - - - - + {% if stack == None %} @@ -35,142 +27,82 @@ {% else %} - {% if stack.stack_status != "CREATE_COMPLETE" %} + {% if 'COMPLETE' not in stack.stack_status %} - - - - - - - - - {% else %} {% for resource in stack_resources.resources %} {% if resource.resource_type == "OS::Nova::Server" %} - - - - {% endif %} {% endfor %} {% endif %} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {% endif %}
Stack Status Stack Status
Status + {{ stack.stack_status }}
Status Detail + {{ stack.stack_status_reason }}
+ onclick="javascript: window.open('{{ openstack_horizon_url }}/project/instances/{{ resource.physical_resource_id }}')"> {{ resource.resource_name }} - Status - -   - - {% if resource.resource_status == "CREATE_COMPLETE" %} -   + + {% if 'COMPLETE' in resource.resource_status %} +
+   {% else %} - +
+   {% endif %}
Options -
- - View in Horizon - - - - Delete Stack - - - - Debug HEAT - - - Update HEAT - + +
+ 🔍 +
+   +
+ +
+   +
+ +
+  
+
- HEAT Snapshots -
- - Create Snapshots - - - - List Snapshots - -
- diff --git a/ajax/templates/ajax/snapshot_list.html b/ajax/templates/ajax/snapshot_list.html deleted file mode 100644 index f25ca61..0000000 --- a/ajax/templates/ajax/snapshot_list.html +++ /dev/null @@ -1,61 +0,0 @@ -{% extends "base.html" %} -{% block title %}Wistar - Lab Rat - Snapshot List{% endblock %} -{% load staticfiles %} -{% block content %} -
- -

Stack Snapshot List

-
    - {% for message in messages %} -
  • {{ message }}
  • - {% endfor %} -
- - - - - - - - {% for snapshot in snapshot_list %} - - - - - - - {% endfor %} -
NameSnapshot IdStack NameOptions
- {{snapshot.name }} - - {{ snapshot.id }} - - {{ snapshot.stack_name }} - - - -
- -
-{% endblock %} diff --git a/ajax/urls.py b/ajax/urls.py index 2398048..59520b4 100644 --- a/ajax/urls.py +++ b/ajax/urls.py @@ -43,15 +43,7 @@ url(r'^deployTopology/$', views.deploy_topology, name='deployTopology'), url(r'^redeployTopology/$', views.redeploy_topology, name='redeployTopology'), url(r'^deployStack/(?P[^/]+)$', views.deploy_stack, name='deployStack'), - url(r'^updateStack/(?P[^/]+)$', views.update_stack, name='updateStack'), url(r'^deleteStack/(?P[^/]+)$', views.delete_stack, name='deleteStack'), - #url(r'^createSnapshot/(?P[^/]+)$', views.create_snapshot, name='createSnapshot'), - #url(r'^createSnapshot/$', views.create_snapshot, name='createSnapshot'), - url(r'^listSnapshot/(?P[^/]+)$', views.list_snapshot, name='listSnapshot'), - url(r'^deleteSnapshot/(?P[^/]+)/(?P[^/]+)/$', views.delete_snapshot, - name='deleteSnapshot'), - url(r'^rollbackSnapshot/(?P[^/]+)/(?P[^/]+)/$', views.rollback_snapshot, - name='rollbackSnapshot'), url(r'^startTopology/$', views.start_topology, name='startTopology'), url(r'^pauseTopology/$', views.pause_topology, name='pauseTopology'), url(r'^manageDomain/$', views.manage_domain, name='manageDomain'), diff --git a/ajax/views.py b/ajax/views.py index 7e5642d..c3c09fe 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -35,7 +35,6 @@ from common.lib import linuxUtils from common.lib import openstackUtils from common.lib import osUtils -from common.lib import vboxUtils from common.lib import wistarUtils from common.lib.WistarException import WistarException from images.models import Image @@ -261,23 +260,26 @@ def get_junos_startup_state(request): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) name = request.POST['name'] + + # always check network if possible regardless of deployment_backend + if "ip" in request.POST: + # this instance is auto-configured, so we can just check for IP here + response_data["network"] = osUtils.check_ip(request.POST["ip"]) + if configuration.deployment_backend == "kvm" and libvirtUtils.is_domain_running(name): # topologies/edit will fire multiple calls at once # let's just put a bit of a breather between each one response_data["power"] = True - if "ip" in request.POST: - # this instance is auto-configured, so we can just check for IP here - response_data["network"] = osUtils.check_ip(request.POST["ip"]) - else: + if "ip" not in request.POST: time.sleep(random.randint(0, 10) * .10) - response_data["console"] = consoleUtils.is_junos_device_at_prompt(name) elif configuration.deployment_backend == "openstack": time.sleep(random.randint(0, 20) * .10) response_data["power"] = True - response_data["console"] = consoleUtils.is_junos_device_at_prompt(name) + # console no longer supported in openstack deployments + response_data["console"] = False return HttpResponse(json.dumps(response_data), content_type="application/json") @@ -293,20 +295,24 @@ def get_linux_startup_state(request): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) name = request.POST['name'] + # always check network if possible regardless of deployment_backend + if "ip" in request.POST: + # this instance is auto-configured, so we can just check for IP here + response_data["network"] = osUtils.check_ip(request.POST["ip"]) if configuration.deployment_backend == "openstack": if openstackUtils.connect_to_openstack(): time.sleep(random.randint(0, 10) * .10) response_data["power"] = True - response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) + # as of 2018-01-01 we no longer support openstack console, this is dead code + # response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) + response_data['console'] = False else: if libvirtUtils.is_domain_running(name): time.sleep(random.randint(0, 10) * .10) response_data["power"] = True - if "ip" in request.POST: - # this instance is auto-configured, so we can just check for IP here - response_data["network"] = osUtils.check_ip(request.POST["ip"]) - else: + # let's check the console only if we do not have network available to check + if "ip" not in request.POST: response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) return HttpResponse(json.dumps(response_data), content_type="application/json") @@ -577,11 +583,17 @@ def refresh_openstack_deployment_status(request, topology_id): stack_details = openstackUtils.get_stack_details(stack_name) stack_resources = dict() logger.debug(stack_details) - if stack_details is not None and stack_details["stack_status"] == "CREATE_COMPLETE": + if stack_details is not None and 'stack_status' in stack_details and 'COMPLETE' in stack_details["stack_status"]: stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) + if hasattr(configuration, 'openstack_horizon_url'): + horizon_url = configuration.openstack_horizon_url + else: + horizon_url = 'http://' + configuration.openstack_host + '/dashboard' + context = {"stack": stack_details, "topology_id": topology.id, "openstack_host": configuration.openstack_host, + "openstack_horizon_url": horizon_url, "stack_resources": stack_resources } return render(request, 'ajax/openstackDeploymentStatus.html', context) @@ -628,7 +640,7 @@ def get_available_ip(request): # IP addresses. This makes the attempt to use 'old' ips that # are at least not still in use. logger.info("getting ips that are currently reserved via DHCP") - all_used_ips = wistarUtils.get_dhcp_reserved_ips() + all_used_ips = wistarUtils.get_consumed_management_ips() logger.debug(all_used_ips) next_ip = wistarUtils.get_next_ip(all_used_ips, 2) logger.debug(next_ip) @@ -1015,8 +1027,15 @@ def inline_deploy_topology(config): if device["cloudInitSupport"]: # grab the last interface management_interface = device["managementInterface"] - # this will come back to haunt me one day. Assume /24 for mgmt network is sprinkled everywhere! - management_ip = device["ip"] + "/24" + + # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 + if '/' in configuration.management_subnet: + management_prefix_len = configuration.management_subnet.split('/')[1] + else: + management_prefix_len = '24' + + management_ip = device['ip'] + '/' + management_prefix_len + # domain_name, host_name, mgmt_ip, mgmt_interface script_string = "" script_param = "" @@ -1419,11 +1438,15 @@ def deploy_stack(request, topology_id): return render(request, 'error.html', {'error': "Topology not found!"}) try: + # generate a stack name + # FIXME should add a check to verify this is a unique name + stack_name = topology.name.replace(' ', '_') + # let's parse the json and convert to simple lists and dicts logger.debug("loading config") config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) logger.debug("Config is loaded") - heat_template = wistarUtils.get_heat_json_from_topology_config(config) + heat_template = wistarUtils.get_heat_json_from_topology_config(config, stack_name) logger.debug("heat template created") if not openstackUtils.connect_to_openstack(): return render(request, 'error.html', {'error': "Could not connect to Openstack"}) @@ -1435,7 +1458,7 @@ def deploy_stack(request, topology_id): raise Exception("No project found for %s" % configuration.openstack_project) # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') + logger.debug(openstackUtils.create_stack(stack_name, heat_template)) return HttpResponseRedirect('/topologies/' + topology_id + '/') @@ -1463,167 +1486,3 @@ def delete_stack(request, topology_id): logger.debug(openstackUtils.delete_stack(stack_name)) return HttpResponseRedirect('/topologies/' + topology_id + '/') - -def update_stack(request, topology_id): - """ - :param request: Django request - :param topology_id: id of the topology to export - :return: renders the updated heat template - """ - logger.debug("-----Inside update stack-----") - try: - topology = Topology.objects.get(pk=topology_id) - except ObjectDoesNotExist: - return render(request, 'error.html', {'error': "Topology not found!"}) - try: - # let's parse the json and convert to simple lists and dicts - logger.debug("loading config") - config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) - logger.debug("Config is loaded") - - # get the tenant_id of the desired project - tenant_id = openstackUtils.get_project_id(configuration.openstack_project) - logger.debug("using tenant_id of: %s" % tenant_id) - if tenant_id is None: - raise Exception("No project found for %s" % configuration.openstack_project) - - # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') - - port_list = openstackUtils.get_stack_ports(stack_name, tenant_id) - print(port_list) - heat_template = wistarUtils.get_heat_json_from_topology_config_for_update(config, port_list) - logger.debug("heat template created---test1") - logger.debug(heat_template) - - logger.debug(openstackUtils.update_stack_template(stack_name, heat_template)) - - return HttpResponseRedirect('/topologies/' + topology_id + '/') - - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) - - - -def list_snapshot(request, topology_id): - """ - :param request: Django request - :param topology_id: id of the topology to export - :return: creates a snapshot of the heat template - """ - try: - logger.debug("Inside create Snapshot----------") - tenant_id = openstackUtils.get_project_id(configuration.openstack_project) - logger.debug("using tenant_id of: %s" % tenant_id) - if tenant_id is None: - raise Exception("No project found for %s" % configuration.openstack_project) - - logger.debug("Topology id -------------------: %s" % topology_id) - - - try: - topology = Topology.objects.get(pk=topology_id) - except ObjectDoesNotExist: - logger.error('topology id %s was not found!' % topology_id) - return render(request, 'error.html', {'error': "Topology not found!"}) - - # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') - snapshot_list = list() - logger.debug("-------------------stack_name--------------------: %s" % stack_name) - if openstackUtils.connect_to_openstack(): - snapshot_list = openstackUtils.get_snapshot_list(tenant_id, stack_name, topology_id) - - context = {'snapshot_list': snapshot_list} - logger.debug("Before rendering-----------") - return render(request, 'ajax/snapshot_list.html', context) - - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) - - -def rollback_snapshot(request, snapshot_id, topology_id): - """ - :param request: Django request - :param topology_id: id of the topology to export - :return: creates a snapshot of the heat template - """ - - try: - logger.debug("Inside rollback Snapshot----------") - tenant_id = openstackUtils.get_project_id(configuration.openstack_project) - logger.debug("using tenant_id of: %s" % tenant_id) - if tenant_id is None: - raise Exception("No project found for %s" % configuration.openstack_project) - - logger.debug("Topology id -------------------: %s" % topology_id) - - try: - topology = Topology.objects.get(pk=topology_id) - except ObjectDoesNotExist: - logger.error('topology id %s was not found!' % topology_id) - return render(request, 'error.html', {'error': "Topology not found!"}) - - # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') - logger.debug("Stack name: %s" % stack_name) - logger.debug("Snapshot id: %s" % snapshot_id) - - if openstackUtils.connect_to_openstack(): - logger.debug(openstackUtils.rollback_snapshot(tenant_id, stack_name, snapshot_id)) - - return HttpResponseRedirect('/topologies/' + topology_id + '/') - - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) - - - - - - - - -def delete_snapshot(request, snapshot_id, topology_id): - """ - :param request: Django request - :param topology_id: id of the topology to export - :return: creates a snapshot of the heat template - """ - - try: - logger.debug("Inside rollback Snapshot----------") - tenant_id = openstackUtils.get_project_id(configuration.openstack_project) - logger.debug("using tenant_id of: %s" % tenant_id) - if tenant_id is None: - raise Exception("No project found for %s" % configuration.openstack_project) - - logger.debug("Topology id -------------------: %s" % topology_id) - - - try: - topology = Topology.objects.get(pk=topology_id) - except ObjectDoesNotExist: - logger.error('topology id %s was not found!' % topology_id) - return render(request, 'error.html', {'error': "Topology not found!"}) - - # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') - logger.debug("Stack name: %s" % stack_name) - logger.debug("Snapshot id: %s" % snapshot_id) - - if openstackUtils.connect_to_openstack(): - logger.debug(openstackUtils.delete_snapshot(tenant_id, stack_name, snapshot_id)) - - return HttpResponseRedirect('/topologies/' + topology_id + '/') - - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index cec3f65..fc48a29 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -27,7 +27,7 @@ from wistar import configuration # OpenStack component URLs -_glance_url = ':9292' +# _glance_url = ':9292/v1' _analytics_url = ':8081' _api_url = ':8082' _os_url = ':5000/v3' @@ -62,6 +62,21 @@ def connect_to_openstack(): """ logger.debug("--- connect_to_openstack ---") + + logger.debug('verify configuration') + + if not hasattr(configuration, 'openstack_host'): + logger.error('Openstack Host is not configured') + return False + + if not hasattr(configuration, 'openstack_user'): + logger.error('Openstack User is not configured') + return False + + if not hasattr(configuration, 'openstack_password'): + logger.error('Openstack Password is not configured') + return False + global _auth_token global _tenant_id global _token_cache_time @@ -203,7 +218,38 @@ def get_project_id(project_name): return None -def upload_image_to_glance(name, image_file_path): +def get_network_id(network_name): + """ + Gets the UUID of the network by network_name + :param network_name: Name of the network + :return: string UUID or None + """ + + logger.debug("--- get_network_id ---") + + networks_url = create_neutron_url('/networks?name=%s' % network_name) + logger.info(networks_url) + networks_string = do_get(networks_url) + logger.info(networks_string) + if networks_string is None: + logger.error('Did not find a network for that name!') + return None + + try: + networks = json.loads(networks_string) + except ValueError: + logger.error('Could not parse json response in get_network_id') + return None + + for network in networks["networks"]: + if network["name"] == network_name: + logger.info('Found id!') + return str(network["id"]) + + return None + + +def upload_image_to_glance_old(name, image_file_path): """ :param name: name of the image to be uploaded @@ -239,18 +285,144 @@ def upload_image_to_glance(name, image_file_path): return None +def upload_image_to_glance(name, image_file_path): + """ + + :param name: name of the image to be created + :param image_file_path: path of the file to upload + :return: json encoded results string from glance REST api + """ + logger.debug("--- create_image_in_glance ---") + + url = create_glance_url('/images') + + try: + + d = dict() + d['disk_format'] = 'qcow2' + d['container_format'] = 'bare' + d['name'] = name + + r_data = do_post(url, json.dumps(d)) + + except Exception as e: + logger.error("Could not upload image to glance") + logger.error("error was %s" % str(e)) + return None + + try: + r_json = json.loads(r_data) + if 'id' in r_json: + image_id = r_json['id'] + + logger.info('Preparing to push image data to glance!') + f = open(image_file_path, 'rb') + fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + upload_url = create_glance_url('/images/%s/file' % image_id) + request = urllib2.Request(upload_url, fio) + request.add_header("Content-Type", "application/octet-stream") + request.add_header("X-Auth-Token", _auth_token) + request.get_method = lambda: 'PUT' + return urllib2.urlopen(request) + else: + logger.error('Could not find an ID key in returned json from glance image create') + logger.error(r_data) + logger.error('returning None') + return None + + except ValueError: + logger.error('Could not parse JSON return data from glance image create') + return None + + +def get_neutron_ports_for_network(network_name): + """ + :return: json response from /ports URL + """ + logger.debug("--- get_neutron_port_list ---") + + network_id = get_network_id(network_name) + if network_id is None: + logger.warn("couldn't find the correct network_id") + return None + + url = create_neutron_url("/ports.json?network_id=%s&fields=id&fields=fixed_ips" % network_id) + logger.debug(url) + port_list_string = do_get(url) + logger.debug(port_list_string) + + return port_list_string + + +def get_consumed_management_ips(): + """ + Return a list of dicts of the format + [ + { "ip-address": "xxx.xxx.xxx.xxx"} + ] + This mimics the libvirt dnsmasq format for dhcp reservations + This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to + get all reserved management ips + :return: list of dicts + """ + consumed_ips = list() + ports_string = get_neutron_ports_for_network(configuration.openstack_mgmt_network) + if ports_string is None: + return consumed_ips + try: + ports = json.loads(ports_string) + except ValueError: + logger.error('Could not parse json response in get_consumed_management_ips') + return consumed_ips + + if 'ports' not in ports: + logger.error('unexpected keys in json response!') + return consumed_ips + + for port in ports['ports']: + for fixed_ip in port['fixed_ips']: + if configuration.management_prefix in fixed_ip['ip_address']: + fip = dict() + fip['ip-address'] = fixed_ip['ip_address'] + consumed_ips.append(fip) + + return consumed_ips + + def get_glance_image_list(): """ - :return: json response from glance /images/ URL + :return: list of json objects from glance /images URL filtered with only shared or public images """ logger.debug("--- get_glance_image_list ---") url = create_glance_url("/images") image_list_string = do_get(url) + + image_list = list() + if image_list_string is None: - return None + return image_list + + try: + glance_return = json.loads(image_list_string) + except ValueError: + logger.warn('Could not parse json response from glance /images') + return image_list + + if 'images' not in glance_return: + logger.warn('did not find images key in glance return data') + logger.debug(glance_return) + return image_list + + for im in glance_return['images']: + + if 'status' in im and im['status'] != 'active': + logger.debug('Skipping non-active image %s' % im['name']) + continue + + if 'visibility' in im and im['visibility'] in ['shared', 'public']: + image_list.append(im) - image_list = json.loads(image_list_string) return image_list @@ -278,10 +450,10 @@ def get_image_id_for_name(image_name): logger.debug("--- get_image_id_for_name ---") image_list = get_glance_image_list() - if image_list is None: + if image_list is None or len(image_list) == 0: return None - for image in image_list["images"]: + for image in image_list: if image["name"] == image_name: return image["id"] @@ -354,10 +526,32 @@ def get_nova_flavors(project_name): def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): + """ + Query nova to get all flavors and return the flavor that best matches our desired constraints + :param project_name: name of the project to check for flavors + :param cpu: number of cores desired + :param ram: amount of ram desired in MB + :param disk: amount of disk required in GB + :return: flavor object {"name": "m1.xlarge"} + """ logger.debug("checking: " + str(cpu) + " " + str(ram) + " " + str(disk)) + + # create an emergency flavor so we have something to return in case we can't connect to openstack + # or some other issue prevents us from determining the right thing to do + emergency_flavor = dict() + emergency_flavor['name'] = "m1.xlarge" + + if not connect_to_openstack(): + return emergency_flavor + flavors = get_nova_flavors(project_name) - flavors_object = json.loads(flavors) + try: + flavors_object = json.loads(flavors) + except ValueError: + logger.error('Could not parse nova return data') + return emergency_flavor + cpu_candidates = list() ram_candidates = list() disk_candidates = list() @@ -395,7 +589,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): if len(disk_candidates) == 0: # uh-oh, just return the largest and hope for the best! - return "m1.xlarge" + return emergency_flavor elif len(disk_candidates) == 1: return disk_candidates[0] else: @@ -403,7 +597,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): # let's find the smallest flavor left! cpu_low = 99 disk_low = 999 - ram_low = 9999 + ram_low = 99999 for f in disk_candidates: if f["vcpus"] < cpu_low: cpu_low = f["vcpus"] @@ -454,209 +648,6 @@ def create_stack(stack_name, template_string): return do_post(url, data) - -def create_stack_snapshot(stack_name, tenant_id, snapshot_name): - """ - Creates a snapshot of the Stack via a HEAT REST call - :param stack_name: name of the stack to create - :param tenant_id: tenant id of the openstack project - :param snapshot_name: name of the snapshot - :return: JSON response from HEAT-API or None on failure - """ - logger.debug("In create stack snapshot--------------") - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = str(stack_details["id"]) - - #stack_id = get_stack_details(stack_name) - create_snapshot_url = create_heat_url("/" + str(tenant_id) + "/stacks/%s/%s/snapshots" % (stack_name, stack_id)) - data = """{ - "name": "%s" - }""" % snapshot_name - logger.debug("Data before posting-----------") - logger.debug(data) - - return do_post(create_snapshot_url, data) - - - -def get_snapshot_list(tenant_id, stack_name, topology_id): - - print("In snapshot list") - - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = str(stack_details["id"]) - snapshot_list_url = create_heat_url('/%s/stacks/%s/%s/snapshots' % (tenant_id, stack_name, stack_id)) - stack_snapshot_list = do_get(snapshot_list_url) - l1 = json.loads(stack_snapshot_list) - #l2 = l1['snapshots'] - snap_list = list() - - for snap in l1["snapshots"]: - if snap["status"] == "COMPLETE": - snap_detail = get_snap_detail(snap, stack_name, topology_id) - snap_list.append(snap_detail) - logger.debug(snap_list) - return snap_list - -def get_snap_detail(snap, stack_name, topology_id): - - logger.debug("Getting snapshot details-------------") - logger.debug(snap) - snap_list = dict() - snap_list["name"] = snap["name"] - snap_list["id"] = snap["id"] - snap_list["stack_name"] = stack_name - snap_list["topology_id"] = str(topology_id) - - return snap_list - -def get_server_details(search_string): - server_details_url = create_nova_url('/servers?name=%s' % search_string) - server_details_json = do_get(server_details_url) - server_details_dict = json.loads(server_details_json) - - if server_details_dict is None: - return None - else: - server_details_list = list() - server_details_list = server_details_dict['servers'] - for det in server_details_list: - server_details = det['id'] - logger.debug(server_details) - return server_details - - - - -def delete_snapshot(tenant_id, stack_name, snapshot_id): - """ - Deletes a stack from OpenStack - :param stack_name: name of the stack to be deleted - :return: JSON response fro HEAT API - """ - logger.debug("--- delete_stack_snapshot ---") - - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s" % (tenant_id, stack_name, stack_id, snapshot_id)) - return do_delete(url) - - - -def rollback_snapshot(tenant_id, stack_name, snapshot_id): - """ - Deletes a stack from OpenStack - :param stack_name: name of the stack to be deleted - :return: JSON response fro HEAT API - """ - logger.debug("--- delete_stack_snapshot ---") - data = "" - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s/restore" % (tenant_id, stack_name, stack_id, snapshot_id)) - return do_post(url, data) - - - -def rebuild_instance_openstack(server_id, image_id): - logger.debug("-------Rebuild server openstack") - url = create_nova_url("/servers/%s/action" % server_id) - data = '''{ - "rebuild" : { - "imageRef" : "%s" - } - }''' % str(image_id) - - return do_post(url, data) - - - - -def update_stack_template(stack_name, template_string): - - """ - Creates a Stack via a HEAT template - :param stack_name: name of the stack to create - :param template_string: HEAT template to be used - :return: JSON response from HEAT-API or None on failure - """ - logger.debug("--- update_stack ---") - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - - url = create_heat_url("/" + str(_tenant_id) + "/stacks/%s/%s" % (stack_name, stack_id)) - logger.debug("URL to update stack") - logger.debug(url) - data = '''{ - "disable_rollback": true, - "parameters": {}, - "template": %s - }''' % (template_string) - logger.debug("updating CREATING stack with data:") - logger.debug(data) - try: - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'PATCH' - - if data == "": - result = urllib2.urlopen(request) - else: - result = urllib2.urlopen(request, data) - - return result.read() - except URLError as e: - logger.error("Could not perform PUT to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - #return do_post(url, data) - - - - -def get_stack_ports(stack_name, tenant_id): - stack_details = get_stack_details(stack_name) - - if stack_details is None: - return None - else: - stack_id = str(stack_details["id"]) - - try: - get_port_url = create_heat_url( '/%s/stacks/%s/%s/resources?type=OS::Neutron::Port' % (tenant_id, stack_name, stack_id)) - resources = do_get(get_port_url) - resource_dict = json.loads(resources) - resource_list = resource_dict['resources'] - print(resource_list) - port_list = list() - for port in resource_list: - port_list.append(port['resource_name']) - print(port_list) - return port_list - - except URLError as e: - logger.error("Could not perform PUT to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - - def get_nova_serial_console(instance_name): """ Get the websocket URL for the serial proxy for a given nova server (instance) @@ -714,6 +705,10 @@ def create_glance_url(url): return "http://" + configuration.openstack_host + _glance_url + url +def create_neutron_url(url): + return "http://" + configuration.openstack_host + _neutron_url + url + + def create_os_url(url): return "http://" + configuration.openstack_host + _os_url + url diff --git a/common/lib/wistarUtils.py b/common/lib/wistarUtils.py index d4cf564..cdaf800 100644 --- a/common/lib/wistarUtils.py +++ b/common/lib/wistarUtils.py @@ -19,6 +19,7 @@ import json import logging +import math import os import re import subprocess @@ -75,26 +76,28 @@ def _generate_mac(topology_id): """ silly attempt to keep mac addresses unique use the topology id to generate 2 octets, and the number of - macs used so far to generate the last one - :param topology_id: id of the topology we are building + macs used so far to generate the last two octets. + Uses the locally administered address ranges 52:54:00 through 52:54:FF + :param topology_id: string id of the topology we are building :return: mostly unique mac address that should be safe to deploy """ + tid = int(topology_id) global mac_counter global used_macs - b1 = "52:54:" - b2 = '%02x' % int(len(used_macs[topology_id]) / 256) - base = b1 + str(b2) + ":" - tid = "%04x" % int(topology_id) - mac_base = base + str(tid[:2]) + ":" + str(tid[2:4]) + ":" - mac = mac_base + (str("%02x" % mac_counter)[:2]) + base = '52:54:00:00:00:00' + ba = base.split(':') + ba[2] = '%02x' % int(tid / 256) + ba[3] = '%02x' % int(tid % 256) + ba[4] = '%02x' % int(len(used_macs[topology_id]) / 256) + ba[5] = '%02x' % int(mac_counter) mac_counter += 1 mac_counter = mac_counter % 256 - return mac + return ':'.join(ba) -def get_heat_json_from_topology_config(config): +def get_heat_json_from_topology_config(config, project_name='admin'): """ Generates heat template from the topology configuration object use load_config_from_topology_json to get the configuration from the Topology @@ -119,10 +122,11 @@ def get_heat_json_from_topology_config(config): nrs = dict() nrs["type"] = "OS::Neutron::Subnet" - + # p = dict() p["cidr"] = "1.1.1.0/24" p["enable_dhcp"] = False + p["gateway_ip"] = "" p["name"] = network["name"] + "_subnet" if network["name"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network @@ -149,10 +153,21 @@ def get_heat_json_from_topology_config(config): image_details_dict[device["imageId"]] = image_details image_name = image_details["name"] - if "disk" in image_details: - image_disk = image_details["disk"] - else: - image_disk = 20 + + image_disk_size = 20 + + # set the size in GB, rounding up to the nearest int + if 'size' in image_details: + current_size = int(image_details['size']) + image_disk_size = int(math.ceil(current_size / 100000000)) + + # if the flavor asks for a minimum disk size, let's see if it's larger that what we have + if "min_disk" in image_details and image_details['min_disk'] > image_disk_size: + image_disk_size = image_details["min_disk"] + + # if the user has specified a desired disk size, grab it here so we get the correct flavor + if type(image_disk_size) is int and device["resizeImage"] > image_disk_size: + image_disk_size = device["resizeImage"] # determine openstack flavor here device_ram = int(device["ram"]) @@ -161,7 +176,7 @@ def get_heat_json_from_topology_config(config): flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, device_cpu, device_ram, - image_disk + image_disk_size ) flavor = flavor_detail["name"] @@ -196,6 +211,7 @@ def get_heat_json_from_topology_config(config): for cfp in device["configDriveParams"]: if "destination" in cfp and cfp["destination"] == "/boot/loader.conf": + logger.debug("Creating loader.conf config-drive entry") template_name = cfp["template"] loader_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -204,12 +220,17 @@ def get_heat_json_from_topology_config(config): device["ip"], device["managementInterface"]) - for l in loader_string: - left, right = l.split('=') - if left not in metadata: - metadata[left] = right + logger.debug('----------') + logger.debug(loader_string) + logger.debug('----------') + for l in loader_string.split('\n'): + if '=' in l: + left, right = l.split('=') + if left not in metadata and left != '': + metadata[left] = right.replace('"', '') if "destination" in cfp and cfp["destination"] == "/juniper.conf": + logger.debug("Creating juniper.conf config-drive entry") template_name = cfp["template"] personality_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -220,166 +241,43 @@ def get_heat_json_from_topology_config(config): dr["properties"]["personality"] = dict() dr["properties"]["personality"] = {"/config/juniper.conf": personality_string} + else: + logger.debug('No juniper.conf found here ') - template["resources"][device["name"]] = dr - - for device in config["devices"]: - index = 0 - for port in device["interfaces"]: - pr = dict() - pr["type"] = "OS::Neutron::Port" - p = dict() - - if port["bridge"] == "virbr0": - p["network_id"] = configuration.openstack_mgmt_network - elif port["bridge"] == configuration.openstack_external_network: - p["network_id"] = configuration.openstack_external_network - else: - p["network_id"] = {"get_resource": port["bridge"]} - p["name"] = device["name"] + "_port" + str(index) - - pr["properties"] = p - template["resources"][device["name"] + "_port" + str(index)] = pr - index += 1 - - return json.dumps(template) - - - - - - - - -def get_heat_json_from_topology_config_for_update(config, port_list): - """ - Generates heat template from the topology configuration object - use load_config_from_topology_json to get the configuration from the Topology - :param config: configuration dict from load_config_from_topology_json - :return: json encoded heat template as String - """ - - template = dict() - template["heat_template_version"] = "2013-05-23" - template["resources"] = dict() - - for network in config["networks"]: - nr = dict() - nr["type"] = "OS::Neutron::Net" - - nrp = dict() - nrp["shared"] = False - nrp["name"] = network["name"] - nrp["admin_state_up"] = True - - nr["properties"] = nrp - - nrs = dict() - nrs["type"] = "OS::Neutron::Subnet" - - p = dict() - p["cidr"] = "1.1.1.0/24" - p["enable_dhcp"] = False - p["name"] = network["name"] + "_subnet" - if network["name"] == "virbr0": - p["network_id"] = configuration.openstack_mgmt_network - elif network["name"] == configuration.openstack_external_network: - p["network_id"] = configuration.openstack_external_network - else: - p["network_id"] = {"get_resource": network["name"]} - - nrs["properties"] = p - - template["resources"][network["name"]] = nr - template["resources"][network["name"] + "_subnet"] = nrs - - # cache the image_details here to avoid multiple REST calls for details about an image type - # as many topologies have lots of the same types of images around - image_details_dict = dict() - - for device in config["devices"]: - - if device["imageId"] in image_details_dict: - image_details = image_details_dict[device["imageId"]] - else: - image_details = imageUtils.get_image_detail(device["imageId"]) - image_details_dict[device["imageId"]] = image_details - - image_name = image_details["name"] - if "disk" in image_details: - image_disk = image_details["disk"] - else: - image_disk = 20 - - # determine openstack flavor here - device_ram = int(device["ram"]) - device_cpu = int(device["cpu"]) - - flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, - device_cpu, - device_ram, - image_disk - ) - - flavor = flavor_detail["name"] - - dr = dict() - dr["type"] = "OS::Nova::Server" - dr["properties"] = dict() - dr["properties"]["flavor"] = flavor - dr["properties"]["networks"] = [] - index = 0 - for p in device["interfaces"]: - port = dict() - port["port"] = dict() - if device["name"] + "_port" + str(index) in port_list: - port["port"]["get_resource"] = device["name"] + "_port" + str(index) + "_nora" - else: - port["port"]["get_resource"] = device["name"] + "_port" + str(index) - index += 1 - dr["properties"]["networks"].append(port) - - dr["properties"]["image"] = image_name - dr["properties"]["name"] = device["name"] - - if device["configDriveSupport"]: + if device['cloudInitSupport']: + logger.debug('creating cloud-init script') dr["properties"]["config_drive"] = True dr["properties"]["user_data_format"] = "RAW" metadata = dict() metadata["hostname"] = device["name"] - metadata["console"] = "vidconsole" dr["properties"]["metadata"] = metadata + # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 + if '/' in configuration.management_subnet: + management_prefix_len = configuration.management_subnet.split('/')[1] + else: + management_prefix_len = '24' - # let's check all the configDriveParams and look for a junos config - # FIXME - this may need tweaked if we need to include config drive cloud-init support for other platforms - # right now we just need to ignore /boot/loader.conf - for cfp in device["configDriveParams"]: - - if "destination" in cfp and cfp["destination"] == "/boot/loader.conf": - template_name = cfp["template"] - loader_string = osUtils.compile_config_drive_params_template(template_name, - device["name"], - device["label"], - device["password"], - device["ip"], - device["managementInterface"]) + management_ip = device['ip'] + '/' + management_prefix_len - for l in loader_string: - left, right = l.split('=') - if left not in metadata: - metadata[left] = right + device_config = osUtils.get_cloud_init_config(device['name'], + device['label'], + management_ip, + device['managementInterface'], + device['password']) - if "destination" in cfp and cfp["destination"] == "/juniper.conf": - template_name = cfp["template"] - personality_string = osUtils.compile_config_drive_params_template(template_name, - device["name"], - device["label"], - device["password"], - device["ip"], - device["managementInterface"]) + script_string = "" + if "configScriptId" in device and device["configScriptId"] != 0: + logger.debug("Passing script data!") + try: + script = Script.objects.get(pk=int(device["configScriptId"])) + script_string = script.script + device_config["script_param"] = device.get("configScriptParam", '') + logger.debug(script_string) + except ObjectDoesNotExist: + logger.info('config script was specified but was not found!') - dr["properties"]["personality"] = dict() - dr["properties"]["personality"] = {"/config/juniper.conf": personality_string} + user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string) + dr["properties"]["user_data"] = user_data_string template["resources"][device["name"]] = dr @@ -392,28 +290,27 @@ def get_heat_json_from_topology_config_for_update(config, port_list): if port["bridge"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network + + # specify our desired IP address on the management interface + p['fixed_ips'] = list() + fip = dict() + fip['ip_address'] = device['ip'] + p['fixed_ips'].append(fip) + elif port["bridge"] == configuration.openstack_external_network: p["network_id"] = configuration.openstack_external_network else: p["network_id"] = {"get_resource": port["bridge"]} - if device["name"] + "_port" + str(index) in port_list: - p["name"] = device["name"] + "_port" + str(index) + "_nora" - else: - p["name"] = device["name"] + "_port" + str(index) + # disable port security on all other ports (in case this isn't set globally) + p['port_security_enabled'] = False pr["properties"] = p - - if device["name"] + "_port" + str(index) in port_list: - template["resources"][device["name"] + "_port" + str(index) + "_nora"] = pr - else: - template["resources"][device["name"] + "_port" + str(index)] = pr + template["resources"][device["name"] + "_port" + str(index)] = pr index += 1 return json.dumps(template) - - def _get_management_macs_for_topology(topology_id): """ returns a list of all macs used for management interfaces for a topology @@ -447,7 +344,10 @@ def load_config_from_topology_json(topology_json, topology_id): # preload all the existing management mac addresses if any global used_macs - used_macs[topology_id] = _get_management_macs_for_topology(topology_id) + if configuration.deployment_backend == "kvm": + used_macs[topology_id] = _get_management_macs_for_topology(topology_id) + else: + used_macs[topology_id] = list() json_data = json.loads(topology_json) @@ -467,8 +367,7 @@ def load_config_from_topology_json(topology_json, topology_id): # has this topology already been deployed? is_deployed = False - existing_macs = _get_management_macs_for_topology(topology_id) - if len(existing_macs) > 0: + if len(used_macs[topology_id]) > 0: # yep, already been deployed is_deployed = True @@ -554,15 +453,18 @@ def load_config_from_topology_json(topology_json, topology_id): device["uuid"] = json_object.get('id', '') device["interfaces"] = [] - # determine next available VNC port that has not currently been assigned - next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) - - # verify that this port is not actually in use by another process - while osUtils.check_port_in_use(next_vnc_port): - device_index += 1 + device['vncPort'] = 0 + if configuration.deployment_backend == "kvm": + # determine next available VNC port that has not currently been assigned next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) - device["vncPort"] = next_vnc_port + # verify that this port is not actually in use by another process + while osUtils.check_port_in_use(next_vnc_port): + device_index += 1 + next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) + + device["vncPort"] = next_vnc_port + # is this a child VM? # children will *always* have a parent attribute set in their userdata parent_id = user_data.get("parent", "") @@ -605,7 +507,10 @@ def load_config_from_topology_json(topology_json, topology_id): # management interface mi will always be connected to default management network (virbr0 on KVM) mi = dict() - if is_deployed and libvirtUtils.domain_exists(device['name']): + # slight optimization for kvm backend, dont generate new mac + if configuration.deployment_backend == "kvm" and \ + is_deployed and \ + libvirtUtils.domain_exists(device['name']): mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name']) else: mi['mac'] = generate_next_mac(topology_id) @@ -784,7 +689,9 @@ def load_config_from_topology_json(topology_json, topology_id): if d["mgmtInterfaceIndex"] == -1: mi = dict() # if this has already been deployed, let's preserve the existing mac address that has been assigned - if is_deployed and libvirtUtils.domain_exists(device['name']): + if configuration.deployment_backend == "kvm" and \ + is_deployed and \ + libvirtUtils.domain_exists(device['name']): mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name']) else: mi['mac'] = generate_next_mac(topology_id) @@ -993,7 +900,7 @@ def get_used_ips(): # logger.info(last_octet) all_ips.append(int(last_octet)) - dhcp_leases = get_dhcp_reserved_ips() + dhcp_leases = get_consumed_management_ips() all_ips.extend(dhcp_leases) logger.debug("sorting and returning all_ips") @@ -1001,23 +908,32 @@ def get_used_ips(): return all_ips -def get_dhcp_reserved_ips(): - # pull current ips out of dhcp reservations and leases files - # return as a single list +def get_consumed_management_ips(): + """ + Return a list of all ip addresses that are currently consumed on the wistar management network + THIS ASSUMES A /24 for THE MANAGEMENT NETWORK! + :return: a list of ints representing the last octet of the /24 management network + """ all_ips = list() - # let's also grab current dhcp leases as well - dhcp_leases = osUtils.get_dhcp_leases() - for lease in dhcp_leases: - ip = str(lease["ip-address"]) - logger.debug("adding active lease %s" % ip) - last_octet = ip.split('.')[-1] - all_ips.append(int(last_octet)) + # let's also grab consumed management ips as well + if configuration.deployment_backend == "openstack": + if openstackUtils.connect_to_openstack(): + dhcp_leases = openstackUtils.get_consumed_management_ips() + else: + return all_ips + else: + dhcp_leases = osUtils.get_dhcp_leases() + # let's also grab current dhcp reservations + dhcp_reservations = osUtils.get_dhcp_reservations() + for dr in dhcp_reservations: + ip = str(dr["ip-address"]) + last_octet = ip.split('.')[-1] + all_ips.append(int(last_octet)) - # let's also grab current dhcp reservations - dhcp_leases = osUtils.get_dhcp_reservations() for lease in dhcp_leases: ip = str(lease["ip-address"]) + logger.debug("adding active lease %s" % ip) last_octet = ip.split('.')[-1] all_ips.append(int(last_octet)) diff --git a/common/static/js/topology_utils.js b/common/static/js/topology_utils.js index d985923..d74907d 100644 --- a/common/static/js/topology_utils.js +++ b/common/static/js/topology_utils.js @@ -67,14 +67,6 @@ function setImageType() { } } - - - - - - - - function addIconAndClose() { rv = addIcon(); if (rv == true) { diff --git a/topologies/templates/topologies/edit.html b/topologies/templates/topologies/edit.html index 8949579..e7d823e 100644 --- a/topologies/templates/topologies/edit.html +++ b/topologies/templates/topologies/edit.html @@ -39,6 +39,8 @@ + + @@ -179,7 +181,7 @@ } updateBootCounter++; console.log("updating boot up state for topology"); - for(v=0;v
-
- -
-
- {% if global_config.deployment_backend == "openstack" and is_deployed == true %} - - - - {% endif %} @@ -2137,7 +2083,9 @@

- + + +
@@ -2207,11 +2155,6 @@ - {% if global_config.deployment_backend == "openstack" and is_deployed == true %} - - - - {% endif %} @@ -2585,43 +2528,6 @@
- - -
-
- - - - - - - - - - - - - - - - -
- -
-
- {% if topo_id != None %}
diff --git a/topologies/templates/topologies/overlay/rebuild_instance.html b/topologies/templates/topologies/overlay/rebuild_instance.html deleted file mode 100644 index d31ce7d..0000000 --- a/topologies/templates/topologies/overlay/rebuild_instance.html +++ /dev/null @@ -1,62 +0,0 @@ - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - -
- X -
-

Rebuild Instance

-
- Instance Name - - {{instance_name}} -
- Base Image - - -
- - - - - -   - -
- diff --git a/topologies/urls.py b/topologies/urls.py index ac816ef..ce47536 100644 --- a/topologies/urls.py +++ b/topologies/urls.py @@ -31,9 +31,6 @@ url(r'^error/$', views.error, name='error'), url(r'^clone/(?P\d+)/$', views.clone, name='clone'), url(r'^createConfigSet/$', views.create_config_set, name='createConfigSet'), - url(r'^createSnapshot/$', views.create_snapshot_topo, name='createSnapshot'), - url(r'^rebuildInstance/$', views.rebuild_instance, name='rebuildInstance'), - url(r'^rebuildServer/$', views.rebuild_server, name='rebuildServer'), url(r'^delete/(?P\d+)/$', views.delete, name='delete'), url(r'^(?P\d+)/$', views.detail, name='detail'), url(r'^launch/(?P\d+)$', views.launch, name='launch'), diff --git a/topologies/views.py b/topologies/views.py index b10bec4..e6c1f85 100644 --- a/topologies/views.py +++ b/topologies/views.py @@ -33,6 +33,7 @@ from common.lib import junosUtils from common.lib import libvirtUtils from common.lib import osUtils +from common.lib import ovsUtils from common.lib import wistarUtils from common.lib import openstackUtils @@ -67,19 +68,22 @@ def edit(request): def new(request): logger.debug('---- topology new ----') - image_list = Image.objects.all().order_by('name') + script_list = Script.objects.all().order_by('name') vm_types = configuration.vm_image_types vm_types_string = json.dumps(vm_types) - image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) currently_allocated_ips = wistarUtils.get_used_ips() - dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() + dhcp_reservations = wistarUtils.get_consumed_management_ips() if configuration.deployment_backend == "openstack": external_bridge = configuration.openstack_external_network + image_list = Image.objects.filter(filePath='').order_by('name') else: external_bridge = configuration.kvm_external_bridge + image_list = Image.objects.exclude(filePath='').order_by('name') + + image_list_json = serializers.serialize('json', image_list, fields=('name', 'type')) context = {'image_list': image_list, 'script_list': script_list, 'vm_types': vm_types_string, 'image_list_json': image_list_json, @@ -126,7 +130,7 @@ def import_topology(request): logger.debug("Iterating json objects in imported data") for json_object in json_data: if "userData" in json_object and "wistarVm" in json_object["userData"]: - logger.debug("Found one") + # logger.debug("Found one") ud = json_object["userData"] # check if we have this type of image image_list = Image.objects.filter(type=ud["type"]) @@ -137,7 +141,7 @@ def import_topology(request): '! Please upload an image of this type and try again') image = image_list[0] - logger.debug(str(image.id)) + # logger.debug(str(image.id)) json_object["userData"]["image"] = image.id valid_ip = wistarUtils.get_next_ip(currently_allocated_ips, next_ip_floor) @@ -157,11 +161,14 @@ def import_topology(request): vm_types = configuration.vm_image_types vm_types_string = json.dumps(vm_types) + dhcp_reservations = wistarUtils.get_consumed_management_ips() + context = {'image_list': image_list, 'image_list_json': image_list_json, 'allocated_ips': currently_allocated_ips, 'script_list': script_list, 'vm_types': vm_types_string, + 'dhcp_reservations': dhcp_reservations, 'topo': topology } @@ -195,7 +202,7 @@ def clone(request, topo_id): currently_allocated_ips = wistarUtils.get_used_ips() cloned_ips = wistarUtils.get_used_ips_from_topology_json(topology.json) - dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() + dhcp_reservations = wistarUtils.get_consumed_management_ips() currently_allocated_ips += cloned_ips @@ -273,11 +280,19 @@ def delete(request, topology_id): if configuration.deployment_backend == "kvm": + if hasattr(configuration, "use_openvswitch") and configuration.use_openvswitch: + use_ovs = True + else: + use_ovs = False + network_list = libvirtUtils.get_networks_for_topology(topology_prefix) for network in network_list: logger.debug("undefine network: " + network["name"]) libvirtUtils.undefine_network(network["name"]) + if use_ovs: + ovsUtils.delete_bridge(network["name"]) + domain_list = libvirtUtils.get_domains_for_topology(topology_prefix) for domain in domain_list: @@ -471,7 +486,7 @@ def add_instance_form(request): image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) currently_allocated_ips = wistarUtils.get_used_ips() - dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() + dhcp_reservations = wistarUtils.get_consumed_management_ips() if configuration.deployment_backend == "openstack": external_bridge = configuration.openstack_external_network @@ -487,124 +502,3 @@ def add_instance_form(request): 'dhcp_reservations': dhcp_reservations, } return render(request, 'topologies/overlay/add_instance.html', context) - - -def rebuild_instance(request): - logger.info('---------rebuild instance--------') - required_fields = set(['instance_name']) - if not required_fields.issubset(request.POST): - return render(request, 'ajax/overlayError.html', {'error': "Invalid Parameters in POST"}) - - instance_name = request.POST['instance_name'] - topo_id = request.POST['topology_id'] - try: - image_list_linux = list() - image_list = Image.objects.all().order_by('name') - for i in image_list: - if not (i.type.startswith('junos')): - - image_list_linux.append(i) - - vm_types = configuration.vm_image_types - vm_types_string = json.dumps(vm_types) - logger.debug(vm_types_string) - - image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) - logger.debug(image_list_json) - - search_string = 't%s_%s' % (topo_id, instance_name) - logger.debug(search_string) - if openstackUtils.connect_to_openstack(): - server_id = openstackUtils.get_server_details(search_string) - context = {'image_list': image_list_linux, - 'instance_name': instance_name, - 'vm_types': vm_types_string, - 'image_list_json': image_list_json, - 'server_id': server_id, - 'topo_id': topo_id - } - return render(request, 'topologies/overlay/rebuild_instance.html', context) - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) - - -def rebuild_server(request): - - try: - logger.debug("Inside the rebuild method") - required_fields = set(['topoIconImageSelect', 'topo_id', 'server_id']) - if not required_fields.issubset(request.POST): - return render(request, 'ajax/overlayError.html', {'error': "Invalid Parameters in POST"}) - topology_id = request.POST["topo_id"] - server_id = request.POST["server_id"] - image_string = request.POST["topoIconImageSelect"].split(":")[2] - if openstackUtils.connect_to_openstack(): - image_id = openstackUtils.get_image_id_for_name(image_string) - - logger.debug("Parameters %s %s %s" % (server_id, topology_id, image_id)) - - if openstackUtils.connect_to_openstack(): - res = openstackUtils.rebuild_instance_openstack(server_id, image_id) - - logger.debug("----------------Response----------------") - - if res is None: - return render(request, 'error.html', {'error': "Not able to rebuild the server"}) - else: - return HttpResponseRedirect('/topologies/' + topology_id + '/') - - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) - - - - -def create_snapshot_topo(request): - - - """ - :param request: Django request - :param topology_id: id of the topology to export - :param snap_name: id of the topology to export - :return: creates a snapshot of the heat template - """ - try: - logger.debug("Inside create Snapshot----------") - tenant_id = openstackUtils.get_project_id(configuration.openstack_project) - logger.debug("using tenant_id of: %s" % tenant_id) - if tenant_id is None: - raise Exception("No project found for %s" % configuration.openstack_project) - logger.debug(request.POST) - required_fields = set(['snap_name', 'snapshot_topo_id']) - if not required_fields.issubset(request.POST): - return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) - - topology_id = request.POST["snapshot_topo_id"] - snap_name = request.POST["snap_name"] - logger.debug("using tenant_id of: %s" % tenant_id) - logger.debug("Topology id -------------------: %s" % topology_id) - logger.debug("Snap name -------------------: %s" % snap_name) - - try: - topology = Topology.objects.get(pk=topology_id) - except ObjectDoesNotExist: - logger.error('topology id %s was not found!' % topology_id) - return render(request, 'error.html', {'error': "Topology not found!"}) - - # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') - - logger.debug("-------------------stack_name--------------------: %s" % stack_name) - if openstackUtils.connect_to_openstack(): - logger.debug(openstackUtils.create_stack_snapshot(stack_name, tenant_id, snap_name)) - - return HttpResponseRedirect('/topologies/' + topology_id + '/') - - except Exception as e: - logger.debug("Caught Exception in deploy") - logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) From fa99401afc1c4c4aea35d98c0ffefb1ca151f538 Mon Sep 17 00:00:00 2001 From: Anurag Menon Date: Fri, 13 Apr 2018 00:06:00 -0400 Subject: [PATCH 03/10] Changes for snapshots latest branch --- .../ajax/openstackDeploymentStatus.html | 117 ++-- ajax/urls.py | 2 - ajax/views.py | 82 ++- common/lib/openstackUtils.py | 623 ++++++++++++------ common/lib/wistarUtils.py | 277 ++++++-- topologies/templates/topologies/edit.html | 134 ++-- topologies/views.py | 37 +- 7 files changed, 808 insertions(+), 464 deletions(-) diff --git a/ajax/templates/ajax/openstackDeploymentStatus.html b/ajax/templates/ajax/openstackDeploymentStatus.html index dfa8d22..9ae792c 100644 --- a/ajax/templates/ajax/openstackDeploymentStatus.html +++ b/ajax/templates/ajax/openstackDeploymentStatus.html @@ -1,19 +1,19 @@ {% load staticfiles %} - - - - - + @@ -35,105 +35,80 @@ {% else %} - {% if stack.stack_status != "CREATE_COMPLETE" %} + {% if 'COMPLETE' not in stack.stack_status %} - - - - - - - - - {% else %} {% for resource in stack_resources.resources %} {% if resource.resource_type == "OS::Nova::Server" %} - - - - {% endif %} {% endfor %} {% endif %} - - - - - - - - - - - - - @@ -143,6 +118,7 @@ onclick="javascript: refreshDeploymentStatus('{{ topology_id }}');"/> + - - - - - - - - - - {% endif %}
Stack Status
Status + {{ stack.stack_status }}
Status Detail + {{ stack.stack_status_reason }}
+ onclick="javascript: window.open('{{ openstack_horizon_url }}/project/instances/{{ resource.physical_resource_id }}')"> {{ resource.resource_name }} - Status - -   - - {% if resource.resource_status == "CREATE_COMPLETE" %} -   + + {% if 'COMPLETE' in resource.resource_status %} +
+   {% else %} - +
+   {% endif %}
Options -
- - View in Horizon - - - - Delete Stack - - - Debug HEAT - - - - Update HEAT - + +
+ 🔍 +
+   +
+ +
+   +
+ +
+   + +  
HEAT Snapshots @@ -161,16 +137,5 @@
- diff --git a/ajax/urls.py b/ajax/urls.py index 2398048..fd3b02d 100644 --- a/ajax/urls.py +++ b/ajax/urls.py @@ -45,8 +45,6 @@ url(r'^deployStack/(?P[^/]+)$', views.deploy_stack, name='deployStack'), url(r'^updateStack/(?P[^/]+)$', views.update_stack, name='updateStack'), url(r'^deleteStack/(?P[^/]+)$', views.delete_stack, name='deleteStack'), - #url(r'^createSnapshot/(?P[^/]+)$', views.create_snapshot, name='createSnapshot'), - #url(r'^createSnapshot/$', views.create_snapshot, name='createSnapshot'), url(r'^listSnapshot/(?P[^/]+)$', views.list_snapshot, name='listSnapshot'), url(r'^deleteSnapshot/(?P[^/]+)/(?P[^/]+)/$', views.delete_snapshot, name='deleteSnapshot'), diff --git a/ajax/views.py b/ajax/views.py index 7e5642d..2c356e8 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -35,7 +35,6 @@ from common.lib import linuxUtils from common.lib import openstackUtils from common.lib import osUtils -from common.lib import vboxUtils from common.lib import wistarUtils from common.lib.WistarException import WistarException from images.models import Image @@ -261,23 +260,26 @@ def get_junos_startup_state(request): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) name = request.POST['name'] + + # always check network if possible regardless of deployment_backend + if "ip" in request.POST: + # this instance is auto-configured, so we can just check for IP here + response_data["network"] = osUtils.check_ip(request.POST["ip"]) + if configuration.deployment_backend == "kvm" and libvirtUtils.is_domain_running(name): # topologies/edit will fire multiple calls at once # let's just put a bit of a breather between each one response_data["power"] = True - if "ip" in request.POST: - # this instance is auto-configured, so we can just check for IP here - response_data["network"] = osUtils.check_ip(request.POST["ip"]) - else: + if "ip" not in request.POST: time.sleep(random.randint(0, 10) * .10) - response_data["console"] = consoleUtils.is_junos_device_at_prompt(name) elif configuration.deployment_backend == "openstack": time.sleep(random.randint(0, 20) * .10) response_data["power"] = True - response_data["console"] = consoleUtils.is_junos_device_at_prompt(name) + # console no longer supported in openstack deployments + response_data["console"] = False return HttpResponse(json.dumps(response_data), content_type="application/json") @@ -293,20 +295,24 @@ def get_linux_startup_state(request): return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) name = request.POST['name'] + # always check network if possible regardless of deployment_backend + if "ip" in request.POST: + # this instance is auto-configured, so we can just check for IP here + response_data["network"] = osUtils.check_ip(request.POST["ip"]) if configuration.deployment_backend == "openstack": if openstackUtils.connect_to_openstack(): time.sleep(random.randint(0, 10) * .10) response_data["power"] = True - response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) + # as of 2018-01-01 we no longer support openstack console, this is dead code + # response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) + response_data['console'] = False else: if libvirtUtils.is_domain_running(name): time.sleep(random.randint(0, 10) * .10) response_data["power"] = True - if "ip" in request.POST: - # this instance is auto-configured, so we can just check for IP here - response_data["network"] = osUtils.check_ip(request.POST["ip"]) - else: + # let's check the console only if we do not have network available to check + if "ip" not in request.POST: response_data["console"] = consoleUtils.is_linux_device_at_prompt(name) return HttpResponse(json.dumps(response_data), content_type="application/json") @@ -577,11 +583,17 @@ def refresh_openstack_deployment_status(request, topology_id): stack_details = openstackUtils.get_stack_details(stack_name) stack_resources = dict() logger.debug(stack_details) - if stack_details is not None and stack_details["stack_status"] == "CREATE_COMPLETE": + if stack_details is not None and 'stack_status' in stack_details and 'COMPLETE' in stack_details["stack_status"]: stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"]) + if hasattr(configuration, 'openstack_horizon_url'): + horizon_url = configuration.openstack_horizon_url + else: + horizon_url = 'http://' + configuration.openstack_host + '/dashboard' + context = {"stack": stack_details, "topology_id": topology.id, "openstack_host": configuration.openstack_host, + "openstack_horizon_url": horizon_url, "stack_resources": stack_resources } return render(request, 'ajax/openstackDeploymentStatus.html', context) @@ -628,7 +640,7 @@ def get_available_ip(request): # IP addresses. This makes the attempt to use 'old' ips that # are at least not still in use. logger.info("getting ips that are currently reserved via DHCP") - all_used_ips = wistarUtils.get_dhcp_reserved_ips() + all_used_ips = wistarUtils.get_consumed_management_ips() logger.debug(all_used_ips) next_ip = wistarUtils.get_next_ip(all_used_ips, 2) logger.debug(next_ip) @@ -1015,8 +1027,15 @@ def inline_deploy_topology(config): if device["cloudInitSupport"]: # grab the last interface management_interface = device["managementInterface"] - # this will come back to haunt me one day. Assume /24 for mgmt network is sprinkled everywhere! - management_ip = device["ip"] + "/24" + + # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 + if '/' in configuration.management_subnet: + management_prefix_len = configuration.management_subnet.split('/')[1] + else: + management_prefix_len = '24' + + management_ip = device['ip'] + '/' + management_prefix_len + # domain_name, host_name, mgmt_ip, mgmt_interface script_string = "" script_param = "" @@ -1164,7 +1183,6 @@ def get_topology_config(request): This is useful to get a list of all objects on the topolgy, filter for objects of a specific type, and verify their boot up state. i.e. to run a command against all Junos devices for example - """ if 'topologyId' not in request.POST: return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) @@ -1189,7 +1207,6 @@ def get_topology_config(request): def execute_linux_automation(request): """ execute cli command on all linux instances in topology - """ if 'topologyId' not in request.POST: return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) @@ -1234,7 +1251,6 @@ def execute_linux_automation(request): def execute_junos_automation(request): """ execute cli command on all junos instances in topology - """ if 'topologyId' not in request.POST: return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) @@ -1419,11 +1435,15 @@ def deploy_stack(request, topology_id): return render(request, 'error.html', {'error': "Topology not found!"}) try: + # generate a stack name + # FIXME should add a check to verify this is a unique name + stack_name = topology.name.replace(' ', '_') + # let's parse the json and convert to simple lists and dicts logger.debug("loading config") config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) logger.debug("Config is loaded") - heat_template = wistarUtils.get_heat_json_from_topology_config(config) + heat_template = wistarUtils.get_heat_json_from_topology_config(config, stack_name) logger.debug("heat template created") if not openstackUtils.connect_to_openstack(): return render(request, 'error.html', {'error': "Could not connect to Openstack"}) @@ -1435,7 +1455,7 @@ def deploy_stack(request, topology_id): raise Exception("No project found for %s" % configuration.openstack_project) # FIXME - verify all images are in glance before jumping off here! - stack_name = topology.name.replace(' ', '_') + logger.debug(openstackUtils.create_stack(stack_name, heat_template)) return HttpResponseRedirect('/topologies/' + topology_id + '/') @@ -1464,6 +1484,7 @@ def delete_stack(request, topology_id): return HttpResponseRedirect('/topologies/' + topology_id + '/') + def update_stack(request, topology_id): """ :param request: Django request @@ -1480,7 +1501,7 @@ def update_stack(request, topology_id): logger.debug("loading config") config = wistarUtils.load_config_from_topology_json(topology.json, topology_id) logger.debug("Config is loaded") - + # get the tenant_id of the desired project tenant_id = openstackUtils.get_project_id(configuration.openstack_project) logger.debug("using tenant_id of: %s" % tenant_id) @@ -1489,13 +1510,13 @@ def update_stack(request, topology_id): # FIXME - verify all images are in glance before jumping off here! stack_name = topology.name.replace(' ', '_') - + port_list = openstackUtils.get_stack_ports(stack_name, tenant_id) print(port_list) heat_template = wistarUtils.get_heat_json_from_topology_config_for_update(config, port_list) logger.debug("heat template created---test1") logger.debug(heat_template) - + logger.debug(openstackUtils.update_stack_template(stack_name, heat_template)) return HttpResponseRedirect('/topologies/' + topology_id + '/') @@ -1506,7 +1527,6 @@ def update_stack(request, topology_id): return render(request, 'error.html', {'error': str(e)}) - def list_snapshot(request, topology_id): """ :param request: Django request @@ -1522,7 +1542,6 @@ def list_snapshot(request, topology_id): logger.debug("Topology id -------------------: %s" % topology_id) - try: topology = Topology.objects.get(pk=topology_id) except ObjectDoesNotExist: @@ -1584,12 +1603,6 @@ def rollback_snapshot(request, snapshot_id, topology_id): return render(request, 'error.html', {'error': str(e)}) - - - - - - def delete_snapshot(request, snapshot_id, topology_id): """ :param request: Django request @@ -1606,7 +1619,6 @@ def delete_snapshot(request, snapshot_id, topology_id): logger.debug("Topology id -------------------: %s" % topology_id) - try: topology = Topology.objects.get(pk=topology_id) except ObjectDoesNotExist: @@ -1622,8 +1634,8 @@ def delete_snapshot(request, snapshot_id, topology_id): logger.debug(openstackUtils.delete_snapshot(tenant_id, stack_name, snapshot_id)) return HttpResponseRedirect('/topologies/' + topology_id + '/') - + except Exception as e: logger.debug("Caught Exception in deploy") logger.debug(str(e)) - return render(request, 'error.html', {'error': str(e)}) + return render(request, 'error.html', {'error': str(e)}) \ No newline at end of file diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py index cec3f65..422dfd1 100644 --- a/common/lib/openstackUtils.py +++ b/common/lib/openstackUtils.py @@ -27,7 +27,7 @@ from wistar import configuration # OpenStack component URLs -_glance_url = ':9292' +# _glance_url = ':9292/v1' _analytics_url = ':8081' _api_url = ':8082' _os_url = ':5000/v3' @@ -62,6 +62,21 @@ def connect_to_openstack(): """ logger.debug("--- connect_to_openstack ---") + + logger.debug('verify configuration') + + if not hasattr(configuration, 'openstack_host'): + logger.error('Openstack Host is not configured') + return False + + if not hasattr(configuration, 'openstack_user'): + logger.error('Openstack User is not configured') + return False + + if not hasattr(configuration, 'openstack_password'): + logger.error('Openstack Password is not configured') + return False + global _auth_token global _tenant_id global _token_cache_time @@ -203,7 +218,38 @@ def get_project_id(project_name): return None -def upload_image_to_glance(name, image_file_path): +def get_network_id(network_name): + """ + Gets the UUID of the network by network_name + :param network_name: Name of the network + :return: string UUID or None + """ + + logger.debug("--- get_network_id ---") + + networks_url = create_neutron_url('/networks?name=%s' % network_name) + logger.info(networks_url) + networks_string = do_get(networks_url) + logger.info(networks_string) + if networks_string is None: + logger.error('Did not find a network for that name!') + return None + + try: + networks = json.loads(networks_string) + except ValueError: + logger.error('Could not parse json response in get_network_id') + return None + + for network in networks["networks"]: + if network["name"] == network_name: + logger.info('Found id!') + return str(network["id"]) + + return None + + +def upload_image_to_glance_old(name, image_file_path): """ :param name: name of the image to be uploaded @@ -239,18 +285,144 @@ def upload_image_to_glance(name, image_file_path): return None +def upload_image_to_glance(name, image_file_path): + """ + + :param name: name of the image to be created + :param image_file_path: path of the file to upload + :return: json encoded results string from glance REST api + """ + logger.debug("--- create_image_in_glance ---") + + url = create_glance_url('/images') + + try: + + d = dict() + d['disk_format'] = 'qcow2' + d['container_format'] = 'bare' + d['name'] = name + + r_data = do_post(url, json.dumps(d)) + + except Exception as e: + logger.error("Could not upload image to glance") + logger.error("error was %s" % str(e)) + return None + + try: + r_json = json.loads(r_data) + if 'id' in r_json: + image_id = r_json['id'] + + logger.info('Preparing to push image data to glance!') + f = open(image_file_path, 'rb') + fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + upload_url = create_glance_url('/images/%s/file' % image_id) + request = urllib2.Request(upload_url, fio) + request.add_header("Content-Type", "application/octet-stream") + request.add_header("X-Auth-Token", _auth_token) + request.get_method = lambda: 'PUT' + return urllib2.urlopen(request) + else: + logger.error('Could not find an ID key in returned json from glance image create') + logger.error(r_data) + logger.error('returning None') + return None + + except ValueError: + logger.error('Could not parse JSON return data from glance image create') + return None + + +def get_neutron_ports_for_network(network_name): + """ + :return: json response from /ports URL + """ + logger.debug("--- get_neutron_port_list ---") + + network_id = get_network_id(network_name) + if network_id is None: + logger.warn("couldn't find the correct network_id") + return None + + url = create_neutron_url("/ports.json?network_id=%s&fields=id&fields=fixed_ips" % network_id) + logger.debug(url) + port_list_string = do_get(url) + logger.debug(port_list_string) + + return port_list_string + + +def get_consumed_management_ips(): + """ + Return a list of dicts of the format + [ + { "ip-address": "xxx.xxx.xxx.xxx"} + ] + This mimics the libvirt dnsmasq format for dhcp reservations + This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to + get all reserved management ips + :return: list of dicts + """ + consumed_ips = list() + ports_string = get_neutron_ports_for_network(configuration.openstack_mgmt_network) + if ports_string is None: + return consumed_ips + try: + ports = json.loads(ports_string) + except ValueError: + logger.error('Could not parse json response in get_consumed_management_ips') + return consumed_ips + + if 'ports' not in ports: + logger.error('unexpected keys in json response!') + return consumed_ips + + for port in ports['ports']: + for fixed_ip in port['fixed_ips']: + if configuration.management_prefix in fixed_ip['ip_address']: + fip = dict() + fip['ip-address'] = fixed_ip['ip_address'] + consumed_ips.append(fip) + + return consumed_ips + + def get_glance_image_list(): """ - :return: json response from glance /images/ URL + :return: list of json objects from glance /images URL filtered with only shared or public images """ logger.debug("--- get_glance_image_list ---") url = create_glance_url("/images") image_list_string = do_get(url) + + image_list = list() + if image_list_string is None: - return None + return image_list + + try: + glance_return = json.loads(image_list_string) + except ValueError: + logger.warn('Could not parse json response from glance /images') + return image_list + + if 'images' not in glance_return: + logger.warn('did not find images key in glance return data') + logger.debug(glance_return) + return image_list + + for im in glance_return['images']: + + if 'status' in im and im['status'] != 'active': + logger.debug('Skipping non-active image %s' % im['name']) + continue + + if 'visibility' in im and im['visibility'] in ['shared', 'public']: + image_list.append(im) - image_list = json.loads(image_list_string) return image_list @@ -278,10 +450,10 @@ def get_image_id_for_name(image_name): logger.debug("--- get_image_id_for_name ---") image_list = get_glance_image_list() - if image_list is None: + if image_list is None or len(image_list) == 0: return None - for image in image_list["images"]: + for image in image_list: if image["name"] == image_name: return image["id"] @@ -354,10 +526,32 @@ def get_nova_flavors(project_name): def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): + """ + Query nova to get all flavors and return the flavor that best matches our desired constraints + :param project_name: name of the project to check for flavors + :param cpu: number of cores desired + :param ram: amount of ram desired in MB + :param disk: amount of disk required in GB + :return: flavor object {"name": "m1.xlarge"} + """ logger.debug("checking: " + str(cpu) + " " + str(ram) + " " + str(disk)) + + # create an emergency flavor so we have something to return in case we can't connect to openstack + # or some other issue prevents us from determining the right thing to do + emergency_flavor = dict() + emergency_flavor['name'] = "m1.xlarge" + + if not connect_to_openstack(): + return emergency_flavor + flavors = get_nova_flavors(project_name) - flavors_object = json.loads(flavors) + try: + flavors_object = json.loads(flavors) + except ValueError: + logger.error('Could not parse nova return data') + return emergency_flavor + cpu_candidates = list() ram_candidates = list() disk_candidates = list() @@ -395,7 +589,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): if len(disk_candidates) == 0: # uh-oh, just return the largest and hope for the best! - return "m1.xlarge" + return emergency_flavor elif len(disk_candidates) == 1: return disk_candidates[0] else: @@ -403,7 +597,7 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk): # let's find the smallest flavor left! cpu_low = 99 disk_low = 999 - ram_low = 9999 + ram_low = 99999 for f in disk_candidates: if f["vcpus"] < cpu_low: cpu_low = f["vcpus"] @@ -454,209 +648,6 @@ def create_stack(stack_name, template_string): return do_post(url, data) - -def create_stack_snapshot(stack_name, tenant_id, snapshot_name): - """ - Creates a snapshot of the Stack via a HEAT REST call - :param stack_name: name of the stack to create - :param tenant_id: tenant id of the openstack project - :param snapshot_name: name of the snapshot - :return: JSON response from HEAT-API or None on failure - """ - logger.debug("In create stack snapshot--------------") - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = str(stack_details["id"]) - - #stack_id = get_stack_details(stack_name) - create_snapshot_url = create_heat_url("/" + str(tenant_id) + "/stacks/%s/%s/snapshots" % (stack_name, stack_id)) - data = """{ - "name": "%s" - }""" % snapshot_name - logger.debug("Data before posting-----------") - logger.debug(data) - - return do_post(create_snapshot_url, data) - - - -def get_snapshot_list(tenant_id, stack_name, topology_id): - - print("In snapshot list") - - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = str(stack_details["id"]) - snapshot_list_url = create_heat_url('/%s/stacks/%s/%s/snapshots' % (tenant_id, stack_name, stack_id)) - stack_snapshot_list = do_get(snapshot_list_url) - l1 = json.loads(stack_snapshot_list) - #l2 = l1['snapshots'] - snap_list = list() - - for snap in l1["snapshots"]: - if snap["status"] == "COMPLETE": - snap_detail = get_snap_detail(snap, stack_name, topology_id) - snap_list.append(snap_detail) - logger.debug(snap_list) - return snap_list - -def get_snap_detail(snap, stack_name, topology_id): - - logger.debug("Getting snapshot details-------------") - logger.debug(snap) - snap_list = dict() - snap_list["name"] = snap["name"] - snap_list["id"] = snap["id"] - snap_list["stack_name"] = stack_name - snap_list["topology_id"] = str(topology_id) - - return snap_list - -def get_server_details(search_string): - server_details_url = create_nova_url('/servers?name=%s' % search_string) - server_details_json = do_get(server_details_url) - server_details_dict = json.loads(server_details_json) - - if server_details_dict is None: - return None - else: - server_details_list = list() - server_details_list = server_details_dict['servers'] - for det in server_details_list: - server_details = det['id'] - logger.debug(server_details) - return server_details - - - - -def delete_snapshot(tenant_id, stack_name, snapshot_id): - """ - Deletes a stack from OpenStack - :param stack_name: name of the stack to be deleted - :return: JSON response fro HEAT API - """ - logger.debug("--- delete_stack_snapshot ---") - - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s" % (tenant_id, stack_name, stack_id, snapshot_id)) - return do_delete(url) - - - -def rollback_snapshot(tenant_id, stack_name, snapshot_id): - """ - Deletes a stack from OpenStack - :param stack_name: name of the stack to be deleted - :return: JSON response fro HEAT API - """ - logger.debug("--- delete_stack_snapshot ---") - data = "" - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s/restore" % (tenant_id, stack_name, stack_id, snapshot_id)) - return do_post(url, data) - - - -def rebuild_instance_openstack(server_id, image_id): - logger.debug("-------Rebuild server openstack") - url = create_nova_url("/servers/%s/action" % server_id) - data = '''{ - "rebuild" : { - "imageRef" : "%s" - } - }''' % str(image_id) - - return do_post(url, data) - - - - -def update_stack_template(stack_name, template_string): - - """ - Creates a Stack via a HEAT template - :param stack_name: name of the stack to create - :param template_string: HEAT template to be used - :return: JSON response from HEAT-API or None on failure - """ - logger.debug("--- update_stack ---") - stack_details = get_stack_details(stack_name) - if stack_details is None: - return None - else: - stack_id = stack_details["id"] - - url = create_heat_url("/" + str(_tenant_id) + "/stacks/%s/%s" % (stack_name, stack_id)) - logger.debug("URL to update stack") - logger.debug(url) - data = '''{ - "disable_rollback": true, - "parameters": {}, - "template": %s - }''' % (template_string) - logger.debug("updating CREATING stack with data:") - logger.debug(data) - try: - request = urllib2.Request(url) - request.add_header("Content-Type", "application/json") - request.add_header("charset", "UTF-8") - request.add_header("X-Auth-Token", _auth_token) - request.get_method = lambda: 'PATCH' - - if data == "": - result = urllib2.urlopen(request) - else: - result = urllib2.urlopen(request, data) - - return result.read() - except URLError as e: - logger.error("Could not perform PUT to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - #return do_post(url, data) - - - - -def get_stack_ports(stack_name, tenant_id): - stack_details = get_stack_details(stack_name) - - if stack_details is None: - return None - else: - stack_id = str(stack_details["id"]) - - try: - get_port_url = create_heat_url( '/%s/stacks/%s/%s/resources?type=OS::Neutron::Port' % (tenant_id, stack_name, stack_id)) - resources = do_get(get_port_url) - resource_dict = json.loads(resources) - resource_list = resource_dict['resources'] - print(resource_list) - port_list = list() - for port in resource_list: - port_list.append(port['resource_name']) - print(port_list) - return port_list - - except URLError as e: - logger.error("Could not perform PUT to url: %s" % url) - logger.error("error was %s" % str(e)) - return None - - def get_nova_serial_console(instance_name): """ Get the websocket URL for the serial proxy for a given nova server (instance) @@ -714,6 +705,10 @@ def create_glance_url(url): return "http://" + configuration.openstack_host + _glance_url + url +def create_neutron_url(url): + return "http://" + configuration.openstack_host + _neutron_url + url + + def create_os_url(url): return "http://" + configuration.openstack_host + _os_url + url @@ -868,3 +863,205 @@ def do_delete(url, data=""): logger.error("Could not perform DELETE to url: %s" % url) logger.error("error was %s" % str(e)) return None + + + + +def create_stack_snapshot(stack_name, tenant_id, snapshot_name): + """ + Creates a snapshot of the Stack via a HEAT REST call + :param stack_name: name of the stack to create + :param tenant_id: tenant id of the openstack project + :param snapshot_name: name of the snapshot + :return: JSON response from HEAT-API or None on failure + """ + logger.debug("In create stack snapshot--------------") + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = str(stack_details["id"]) + + #stack_id = get_stack_details(stack_name) + create_snapshot_url = create_heat_url("/" + str(tenant_id) + "/stacks/%s/%s/snapshots" % (stack_name, stack_id)) + data = """{ + "name": "%s" + }""" % snapshot_name + logger.debug("Data before posting-----------") + logger.debug(data) + + return do_post(create_snapshot_url, data) + + + +def get_snapshot_list(tenant_id, stack_name, topology_id): + + print("In snapshot list") + + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = str(stack_details["id"]) + snapshot_list_url = create_heat_url('/%s/stacks/%s/%s/snapshots' % (tenant_id, stack_name, stack_id)) + stack_snapshot_list = do_get(snapshot_list_url) + l1 = json.loads(stack_snapshot_list) + #l2 = l1['snapshots'] + snap_list = list() + + for snap in l1["snapshots"]: + if snap["status"] == "COMPLETE": + snap_detail = get_snap_detail(snap, stack_name, topology_id) + snap_list.append(snap_detail) + logger.debug(snap_list) + return snap_list + +def get_snap_detail(snap, stack_name, topology_id): + + logger.debug("Getting snapshot details-------------") + logger.debug(snap) + snap_list = dict() + snap_list["name"] = snap["name"] + snap_list["id"] = snap["id"] + snap_list["stack_name"] = stack_name + snap_list["topology_id"] = str(topology_id) + + return snap_list + +def get_server_details(search_string): + server_details_url = create_nova_url('/servers?name=%s' % search_string) + server_details_json = do_get(server_details_url) + server_details_dict = json.loads(server_details_json) + + if server_details_dict is None: + return None + else: + server_details_list = list() + server_details_list = server_details_dict['servers'] + for det in server_details_list: + server_details = det['id'] + logger.debug(server_details) + return server_details + + + + +def delete_snapshot(tenant_id, stack_name, snapshot_id): + """ + Deletes a stack from OpenStack + :param stack_name: name of the stack to be deleted + :return: JSON response fro HEAT API + """ + logger.debug("--- delete_stack_snapshot ---") + + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = stack_details["id"] + url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s" % (tenant_id, stack_name, stack_id, snapshot_id)) + return do_delete(url) + + + +def rollback_snapshot(tenant_id, stack_name, snapshot_id): + """ + Deletes a stack from OpenStack + :param stack_name: name of the stack to be deleted + :return: JSON response fro HEAT API + """ + logger.debug("--- delete_stack_snapshot ---") + data = "" + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = stack_details["id"] + url = create_heat_url("/%s/stacks/%s/%s/snapshots/%s/restore" % (tenant_id, stack_name, stack_id, snapshot_id)) + return do_post(url, data) + + + +def rebuild_instance_openstack(server_id, image_id): + logger.debug("-------Rebuild server openstack") + url = create_nova_url("/servers/%s/action" % server_id) + data = '''{ + "rebuild" : { + "imageRef" : "%s" + } + }''' % str(image_id) + + return do_post(url, data) + + + + +def update_stack_template(stack_name, template_string): + + """ + Creates a Stack via a HEAT template + :param stack_name: name of the stack to create + :param template_string: HEAT template to be used + :return: JSON response from HEAT-API or None on failure + """ + logger.debug("--- update_stack ---") + stack_details = get_stack_details(stack_name) + if stack_details is None: + return None + else: + stack_id = stack_details["id"] + + url = create_heat_url("/" + str(_tenant_id) + "/stacks/%s/%s" % (stack_name, stack_id)) + logger.debug("URL to update stack") + logger.debug(url) + data = '''{ + "disable_rollback": true, + "parameters": {}, + "template": %s + }''' % (template_string) + logger.debug("updating CREATING stack with data:") + logger.debug(data) + try: + request = urllib2.Request(url) + request.add_header("Content-Type", "application/json") + request.add_header("charset", "UTF-8") + request.add_header("X-Auth-Token", _auth_token) + request.get_method = lambda: 'PATCH' + + if data == "": + result = urllib2.urlopen(request) + else: + result = urllib2.urlopen(request, data) + + return result.read() + except URLError as e: + logger.error("Could not perform PUT to url: %s" % url) + logger.error("error was %s" % str(e)) + return None + #return do_post(url, data) + + +def get_stack_ports(stack_name, tenant_id): + stack_details = get_stack_details(stack_name) + + if stack_details is None: + return None + else: + stack_id = str(stack_details["id"]) + + try: + get_port_url = create_heat_url( '/%s/stacks/%s/%s/resources?type=OS::Neutron::Port' % (tenant_id, stack_name, stack_id)) + resources = do_get(get_port_url) + resource_dict = json.loads(resources) + resource_list = resource_dict['resources'] + print(resource_list) + port_list = list() + for port in resource_list: + port_list.append(port['resource_name']) + print(port_list) + return port_list + + except URLError as e: + logger.error("Could not perform PUT to url: %s" % url) + logger.error("error was %s" % str(e)) + return None diff --git a/common/lib/wistarUtils.py b/common/lib/wistarUtils.py index d4cf564..ed2fa51 100644 --- a/common/lib/wistarUtils.py +++ b/common/lib/wistarUtils.py @@ -19,6 +19,7 @@ import json import logging +import math import os import re import subprocess @@ -75,26 +76,28 @@ def _generate_mac(topology_id): """ silly attempt to keep mac addresses unique use the topology id to generate 2 octets, and the number of - macs used so far to generate the last one - :param topology_id: id of the topology we are building + macs used so far to generate the last two octets. + Uses the locally administered address ranges 52:54:00 through 52:54:FF + :param topology_id: string id of the topology we are building :return: mostly unique mac address that should be safe to deploy """ + tid = int(topology_id) global mac_counter global used_macs - b1 = "52:54:" - b2 = '%02x' % int(len(used_macs[topology_id]) / 256) - base = b1 + str(b2) + ":" - tid = "%04x" % int(topology_id) - mac_base = base + str(tid[:2]) + ":" + str(tid[2:4]) + ":" - mac = mac_base + (str("%02x" % mac_counter)[:2]) + base = '52:54:00:00:00:00' + ba = base.split(':') + ba[2] = '%02x' % int(tid / 256) + ba[3] = '%02x' % int(tid % 256) + ba[4] = '%02x' % int(len(used_macs[topology_id]) / 256) + ba[5] = '%02x' % int(mac_counter) mac_counter += 1 mac_counter = mac_counter % 256 - return mac + return ':'.join(ba) -def get_heat_json_from_topology_config(config): +def get_heat_json_from_topology_config(config, project_name='admin'): """ Generates heat template from the topology configuration object use load_config_from_topology_json to get the configuration from the Topology @@ -119,10 +122,11 @@ def get_heat_json_from_topology_config(config): nrs = dict() nrs["type"] = "OS::Neutron::Subnet" - + # p = dict() p["cidr"] = "1.1.1.0/24" p["enable_dhcp"] = False + p["gateway_ip"] = "" p["name"] = network["name"] + "_subnet" if network["name"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network @@ -149,10 +153,21 @@ def get_heat_json_from_topology_config(config): image_details_dict[device["imageId"]] = image_details image_name = image_details["name"] - if "disk" in image_details: - image_disk = image_details["disk"] - else: - image_disk = 20 + + image_disk_size = 20 + + # set the size in GB, rounding up to the nearest int + if 'size' in image_details: + current_size = int(image_details['size']) + image_disk_size = int(math.ceil(current_size / 100000000)) + + # if the flavor asks for a minimum disk size, let's see if it's larger that what we have + if "min_disk" in image_details and image_details['min_disk'] > image_disk_size: + image_disk_size = image_details["min_disk"] + + # if the user has specified a desired disk size, grab it here so we get the correct flavor + if type(image_disk_size) is int and device["resizeImage"] > image_disk_size: + image_disk_size = device["resizeImage"] # determine openstack flavor here device_ram = int(device["ram"]) @@ -161,7 +176,7 @@ def get_heat_json_from_topology_config(config): flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, device_cpu, device_ram, - image_disk + image_disk_size ) flavor = flavor_detail["name"] @@ -196,6 +211,7 @@ def get_heat_json_from_topology_config(config): for cfp in device["configDriveParams"]: if "destination" in cfp and cfp["destination"] == "/boot/loader.conf": + logger.debug("Creating loader.conf config-drive entry") template_name = cfp["template"] loader_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -204,12 +220,17 @@ def get_heat_json_from_topology_config(config): device["ip"], device["managementInterface"]) - for l in loader_string: - left, right = l.split('=') - if left not in metadata: - metadata[left] = right + logger.debug('----------') + logger.debug(loader_string) + logger.debug('----------') + for l in loader_string.split('\n'): + if '=' in l: + left, right = l.split('=') + if left not in metadata and left != '': + metadata[left] = right.replace('"', '') if "destination" in cfp and cfp["destination"] == "/juniper.conf": + logger.debug("Creating juniper.conf config-drive entry") template_name = cfp["template"] personality_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -220,6 +241,43 @@ def get_heat_json_from_topology_config(config): dr["properties"]["personality"] = dict() dr["properties"]["personality"] = {"/config/juniper.conf": personality_string} + else: + logger.debug('No juniper.conf found here ') + + if device['cloudInitSupport']: + logger.debug('creating cloud-init script') + dr["properties"]["config_drive"] = True + dr["properties"]["user_data_format"] = "RAW" + metadata = dict() + metadata["hostname"] = device["name"] + dr["properties"]["metadata"] = metadata + # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 + if '/' in configuration.management_subnet: + management_prefix_len = configuration.management_subnet.split('/')[1] + else: + management_prefix_len = '24' + + management_ip = device['ip'] + '/' + management_prefix_len + + device_config = osUtils.get_cloud_init_config(device['name'], + device['label'], + management_ip, + device['managementInterface'], + device['password']) + + script_string = "" + if "configScriptId" in device and device["configScriptId"] != 0: + logger.debug("Passing script data!") + try: + script = Script.objects.get(pk=int(device["configScriptId"])) + script_string = script.script + device_config["script_param"] = device.get("configScriptParam", '') + logger.debug(script_string) + except ObjectDoesNotExist: + logger.info('config script was specified but was not found!') + + user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string) + dr["properties"]["user_data"] = user_data_string template["resources"][device["name"]] = dr @@ -232,12 +290,21 @@ def get_heat_json_from_topology_config(config): if port["bridge"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network + + # specify our desired IP address on the management interface + p['fixed_ips'] = list() + fip = dict() + fip['ip_address'] = device['ip'] + p['fixed_ips'].append(fip) + elif port["bridge"] == configuration.openstack_external_network: p["network_id"] = configuration.openstack_external_network else: p["network_id"] = {"get_resource": port["bridge"]} - p["name"] = device["name"] + "_port" + str(index) + # disable port security on all other ports (in case this isn't set globally) + p['port_security_enabled'] = False + p["name"] = device["name"] + "_port" + str(index) pr["properties"] = p template["resources"][device["name"] + "_port" + str(index)] = pr index += 1 @@ -246,11 +313,6 @@ def get_heat_json_from_topology_config(config): - - - - - def get_heat_json_from_topology_config_for_update(config, port_list): """ Generates heat template from the topology configuration object @@ -276,10 +338,11 @@ def get_heat_json_from_topology_config_for_update(config, port_list): nrs = dict() nrs["type"] = "OS::Neutron::Subnet" - + # p = dict() p["cidr"] = "1.1.1.0/24" p["enable_dhcp"] = False + p["gateway_ip"] = "" p["name"] = network["name"] + "_subnet" if network["name"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network @@ -306,10 +369,21 @@ def get_heat_json_from_topology_config_for_update(config, port_list): image_details_dict[device["imageId"]] = image_details image_name = image_details["name"] - if "disk" in image_details: - image_disk = image_details["disk"] - else: - image_disk = 20 + + image_disk_size = 20 + + # set the size in GB, rounding up to the nearest int + if 'size' in image_details: + current_size = int(image_details['size']) + image_disk_size = int(math.ceil(current_size / 100000000)) + + # if the flavor asks for a minimum disk size, let's see if it's larger that what we have + if "min_disk" in image_details and image_details['min_disk'] > image_disk_size: + image_disk_size = image_details["min_disk"] + + # if the user has specified a desired disk size, grab it here so we get the correct flavor + if type(image_disk_size) is int and device["resizeImage"] > image_disk_size: + image_disk_size = device["resizeImage"] # determine openstack flavor here device_ram = int(device["ram"]) @@ -318,7 +392,7 @@ def get_heat_json_from_topology_config_for_update(config, port_list): flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project, device_cpu, device_ram, - image_disk + image_disk_size ) flavor = flavor_detail["name"] @@ -356,6 +430,7 @@ def get_heat_json_from_topology_config_for_update(config, port_list): for cfp in device["configDriveParams"]: if "destination" in cfp and cfp["destination"] == "/boot/loader.conf": + logger.debug("Creating loader.conf config-drive entry") template_name = cfp["template"] loader_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -364,12 +439,17 @@ def get_heat_json_from_topology_config_for_update(config, port_list): device["ip"], device["managementInterface"]) - for l in loader_string: - left, right = l.split('=') - if left not in metadata: - metadata[left] = right + logger.debug('----------') + logger.debug(loader_string) + logger.debug('----------') + for l in loader_string.split('\n'): + if '=' in l: + left, right = l.split('=') + if left not in metadata and left != '': + metadata[left] = right.replace('"', '') if "destination" in cfp and cfp["destination"] == "/juniper.conf": + logger.debug("Creating juniper.conf config-drive entry") template_name = cfp["template"] personality_string = osUtils.compile_config_drive_params_template(template_name, device["name"], @@ -380,6 +460,43 @@ def get_heat_json_from_topology_config_for_update(config, port_list): dr["properties"]["personality"] = dict() dr["properties"]["personality"] = {"/config/juniper.conf": personality_string} + else: + logger.debug('No juniper.conf found here ') + + if device['cloudInitSupport']: + logger.debug('creating cloud-init script') + dr["properties"]["config_drive"] = True + dr["properties"]["user_data_format"] = "RAW" + metadata = dict() + metadata["hostname"] = device["name"] + dr["properties"]["metadata"] = metadata + # grab the prefix len from the management subnet which is in the form 192.168.122.0/24 + if '/' in configuration.management_subnet: + management_prefix_len = configuration.management_subnet.split('/')[1] + else: + management_prefix_len = '24' + + management_ip = device['ip'] + '/' + management_prefix_len + + device_config = osUtils.get_cloud_init_config(device['name'], + device['label'], + management_ip, + device['managementInterface'], + device['password']) + + script_string = "" + if "configScriptId" in device and device["configScriptId"] != 0: + logger.debug("Passing script data!") + try: + script = Script.objects.get(pk=int(device["configScriptId"])) + script_string = script.script + device_config["script_param"] = device.get("configScriptParam", '') + logger.debug(script_string) + except ObjectDoesNotExist: + logger.info('config script was specified but was not found!') + + user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string) + dr["properties"]["user_data"] = user_data_string template["resources"][device["name"]] = dr @@ -392,21 +509,33 @@ def get_heat_json_from_topology_config_for_update(config, port_list): if port["bridge"] == "virbr0": p["network_id"] = configuration.openstack_mgmt_network + + # specify our desired IP address on the management interface + p['fixed_ips'] = list() + fip = dict() + fip['ip_address'] = device['ip'] + p['fixed_ips'].append(fip) + elif port["bridge"] == configuration.openstack_external_network: p["network_id"] = configuration.openstack_external_network else: p["network_id"] = {"get_resource": port["bridge"]} + # disable port security on all other ports (in case this isn't set globally) + p['port_security_enabled'] = False + if device["name"] + "_port" + str(index) in port_list: p["name"] = device["name"] + "_port" + str(index) + "_nora" else: p["name"] = device["name"] + "_port" + str(index) - pr["properties"] = p + pr["properties"] = p if device["name"] + "_port" + str(index) in port_list: template["resources"][device["name"] + "_port" + str(index) + "_nora"] = pr else: template["resources"][device["name"] + "_port" + str(index)] = pr + + index += 1 return json.dumps(template) @@ -414,6 +543,11 @@ def get_heat_json_from_topology_config_for_update(config, port_list): + + + + + def _get_management_macs_for_topology(topology_id): """ returns a list of all macs used for management interfaces for a topology @@ -447,7 +581,10 @@ def load_config_from_topology_json(topology_json, topology_id): # preload all the existing management mac addresses if any global used_macs - used_macs[topology_id] = _get_management_macs_for_topology(topology_id) + if configuration.deployment_backend == "kvm": + used_macs[topology_id] = _get_management_macs_for_topology(topology_id) + else: + used_macs[topology_id] = list() json_data = json.loads(topology_json) @@ -467,8 +604,7 @@ def load_config_from_topology_json(topology_json, topology_id): # has this topology already been deployed? is_deployed = False - existing_macs = _get_management_macs_for_topology(topology_id) - if len(existing_macs) > 0: + if len(used_macs[topology_id]) > 0: # yep, already been deployed is_deployed = True @@ -554,15 +690,18 @@ def load_config_from_topology_json(topology_json, topology_id): device["uuid"] = json_object.get('id', '') device["interfaces"] = [] - # determine next available VNC port that has not currently been assigned - next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) - - # verify that this port is not actually in use by another process - while osUtils.check_port_in_use(next_vnc_port): - device_index += 1 + device['vncPort'] = 0 + if configuration.deployment_backend == "kvm": + # determine next available VNC port that has not currently been assigned next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) - device["vncPort"] = next_vnc_port + # verify that this port is not actually in use by another process + while osUtils.check_port_in_use(next_vnc_port): + device_index += 1 + next_vnc_port = libvirtUtils.get_next_domain_vnc_port(device_index) + + device["vncPort"] = next_vnc_port + # is this a child VM? # children will *always* have a parent attribute set in their userdata parent_id = user_data.get("parent", "") @@ -605,7 +744,10 @@ def load_config_from_topology_json(topology_json, topology_id): # management interface mi will always be connected to default management network (virbr0 on KVM) mi = dict() - if is_deployed and libvirtUtils.domain_exists(device['name']): + # slight optimization for kvm backend, dont generate new mac + if configuration.deployment_backend == "kvm" and \ + is_deployed and \ + libvirtUtils.domain_exists(device['name']): mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name']) else: mi['mac'] = generate_next_mac(topology_id) @@ -784,7 +926,9 @@ def load_config_from_topology_json(topology_json, topology_id): if d["mgmtInterfaceIndex"] == -1: mi = dict() # if this has already been deployed, let's preserve the existing mac address that has been assigned - if is_deployed and libvirtUtils.domain_exists(device['name']): + if configuration.deployment_backend == "kvm" and \ + is_deployed and \ + libvirtUtils.domain_exists(device['name']): mi['mac'] = libvirtUtils.get_management_interface_mac_for_domain(device['name']) else: mi['mac'] = generate_next_mac(topology_id) @@ -993,7 +1137,7 @@ def get_used_ips(): # logger.info(last_octet) all_ips.append(int(last_octet)) - dhcp_leases = get_dhcp_reserved_ips() + dhcp_leases = get_consumed_management_ips() all_ips.extend(dhcp_leases) logger.debug("sorting and returning all_ips") @@ -1001,23 +1145,32 @@ def get_used_ips(): return all_ips -def get_dhcp_reserved_ips(): - # pull current ips out of dhcp reservations and leases files - # return as a single list +def get_consumed_management_ips(): + """ + Return a list of all ip addresses that are currently consumed on the wistar management network + THIS ASSUMES A /24 for THE MANAGEMENT NETWORK! + :return: a list of ints representing the last octet of the /24 management network + """ all_ips = list() - # let's also grab current dhcp leases as well - dhcp_leases = osUtils.get_dhcp_leases() - for lease in dhcp_leases: - ip = str(lease["ip-address"]) - logger.debug("adding active lease %s" % ip) - last_octet = ip.split('.')[-1] - all_ips.append(int(last_octet)) + # let's also grab consumed management ips as well + if configuration.deployment_backend == "openstack": + if openstackUtils.connect_to_openstack(): + dhcp_leases = openstackUtils.get_consumed_management_ips() + else: + return all_ips + else: + dhcp_leases = osUtils.get_dhcp_leases() + # let's also grab current dhcp reservations + dhcp_reservations = osUtils.get_dhcp_reservations() + for dr in dhcp_reservations: + ip = str(dr["ip-address"]) + last_octet = ip.split('.')[-1] + all_ips.append(int(last_octet)) - # let's also grab current dhcp reservations - dhcp_leases = osUtils.get_dhcp_reservations() for lease in dhcp_leases: ip = str(lease["ip-address"]) + logger.debug("adding active lease %s" % ip) last_octet = ip.split('.')[-1] all_ips.append(int(last_octet)) diff --git a/topologies/templates/topologies/edit.html b/topologies/templates/topologies/edit.html index 8949579..2be1577 100644 --- a/topologies/templates/topologies/edit.html +++ b/topologies/templates/topologies/edit.html @@ -39,6 +39,8 @@ + + @@ -179,7 +181,7 @@ } updateBootCounter++; console.log("updating boot up state for topology"); - for(v=0;v
-
- -
-
- - {% if global_config.deployment_backend == "openstack" and is_deployed == true %} - - - {% endif %} @@ -2137,7 +2137,14 @@

- + + + + {% if global_config.deployment_backend == "openstack" and is_deployed == true %} + + + + {% endif %}
@@ -2586,7 +2593,6 @@
-
@@ -2622,6 +2628,10 @@
+ + + + {% if topo_id != None %}
diff --git a/topologies/views.py b/topologies/views.py index b10bec4..443d820 100644 --- a/topologies/views.py +++ b/topologies/views.py @@ -33,6 +33,7 @@ from common.lib import junosUtils from common.lib import libvirtUtils from common.lib import osUtils +from common.lib import ovsUtils from common.lib import wistarUtils from common.lib import openstackUtils @@ -67,19 +68,22 @@ def edit(request): def new(request): logger.debug('---- topology new ----') - image_list = Image.objects.all().order_by('name') + script_list = Script.objects.all().order_by('name') vm_types = configuration.vm_image_types vm_types_string = json.dumps(vm_types) - image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) currently_allocated_ips = wistarUtils.get_used_ips() - dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() + dhcp_reservations = wistarUtils.get_consumed_management_ips() if configuration.deployment_backend == "openstack": external_bridge = configuration.openstack_external_network + image_list = Image.objects.filter(filePath='').order_by('name') else: external_bridge = configuration.kvm_external_bridge + image_list = Image.objects.exclude(filePath='').order_by('name') + + image_list_json = serializers.serialize('json', image_list, fields=('name', 'type')) context = {'image_list': image_list, 'script_list': script_list, 'vm_types': vm_types_string, 'image_list_json': image_list_json, @@ -126,7 +130,7 @@ def import_topology(request): logger.debug("Iterating json objects in imported data") for json_object in json_data: if "userData" in json_object and "wistarVm" in json_object["userData"]: - logger.debug("Found one") + # logger.debug("Found one") ud = json_object["userData"] # check if we have this type of image image_list = Image.objects.filter(type=ud["type"]) @@ -137,7 +141,7 @@ def import_topology(request): '! Please upload an image of this type and try again') image = image_list[0] - logger.debug(str(image.id)) + # logger.debug(str(image.id)) json_object["userData"]["image"] = image.id valid_ip = wistarUtils.get_next_ip(currently_allocated_ips, next_ip_floor) @@ -157,11 +161,14 @@ def import_topology(request): vm_types = configuration.vm_image_types vm_types_string = json.dumps(vm_types) + dhcp_reservations = wistarUtils.get_consumed_management_ips() + context = {'image_list': image_list, 'image_list_json': image_list_json, 'allocated_ips': currently_allocated_ips, 'script_list': script_list, 'vm_types': vm_types_string, + 'dhcp_reservations': dhcp_reservations, 'topo': topology } @@ -195,7 +202,7 @@ def clone(request, topo_id): currently_allocated_ips = wistarUtils.get_used_ips() cloned_ips = wistarUtils.get_used_ips_from_topology_json(topology.json) - dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() + dhcp_reservations = wistarUtils.get_consumed_management_ips() currently_allocated_ips += cloned_ips @@ -273,11 +280,19 @@ def delete(request, topology_id): if configuration.deployment_backend == "kvm": + if hasattr(configuration, "use_openvswitch") and configuration.use_openvswitch: + use_ovs = True + else: + use_ovs = False + network_list = libvirtUtils.get_networks_for_topology(topology_prefix) for network in network_list: logger.debug("undefine network: " + network["name"]) libvirtUtils.undefine_network(network["name"]) + if use_ovs: + ovsUtils.delete_bridge(network["name"]) + domain_list = libvirtUtils.get_domains_for_topology(topology_prefix) for domain in domain_list: @@ -471,7 +486,7 @@ def add_instance_form(request): image_list_json = serializers.serialize('json', Image.objects.all(), fields=('name', 'type')) currently_allocated_ips = wistarUtils.get_used_ips() - dhcp_reservations = wistarUtils.get_dhcp_reserved_ips() + dhcp_reservations = wistarUtils.get_consumed_management_ips() if configuration.deployment_backend == "openstack": external_bridge = configuration.openstack_external_network @@ -502,7 +517,6 @@ def rebuild_instance(request): image_list = Image.objects.all().order_by('name') for i in image_list: if not (i.type.startswith('junos')): - image_list_linux.append(i) vm_types = configuration.vm_image_types @@ -531,7 +545,6 @@ def rebuild_instance(request): def rebuild_server(request): - try: logger.debug("Inside the rebuild method") required_fields = set(['topoIconImageSelect', 'topo_id', 'server_id']) @@ -561,11 +574,7 @@ def rebuild_server(request): return render(request, 'error.html', {'error': str(e)}) - - def create_snapshot_topo(request): - - """ :param request: Django request :param topology_id: id of the topology to export @@ -597,7 +606,7 @@ def create_snapshot_topo(request): # FIXME - verify all images are in glance before jumping off here! stack_name = topology.name.replace(' ', '_') - + logger.debug("-------------------stack_name--------------------: %s" % stack_name) if openstackUtils.connect_to_openstack(): logger.debug(openstackUtils.create_stack_snapshot(stack_name, tenant_id, snap_name)) From 42d9b224ab40af5711326453bb66c2de3b9921b0 Mon Sep 17 00:00:00 2001 From: anuragmenon2011 Date: Fri, 13 Apr 2018 10:36:46 -0400 Subject: [PATCH 04/10] Add files via upload --- ajax/templates/ajax/snapshot_list.html | 61 ++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 ajax/templates/ajax/snapshot_list.html diff --git a/ajax/templates/ajax/snapshot_list.html b/ajax/templates/ajax/snapshot_list.html new file mode 100644 index 0000000..f25ca61 --- /dev/null +++ b/ajax/templates/ajax/snapshot_list.html @@ -0,0 +1,61 @@ +{% extends "base.html" %} +{% block title %}Wistar - Lab Rat - Snapshot List{% endblock %} +{% load staticfiles %} +{% block content %} +
+ +

Stack Snapshot List

+
    + {% for message in messages %} +
  • {{ message }}
  • + {% endfor %} +
+
+ + + + + + + {% for snapshot in snapshot_list %} + + + + + + + {% endfor %} +
NameSnapshot IdStack NameOptions
+ {{snapshot.name }} + + {{ snapshot.id }} + + {{ snapshot.stack_name }} + + + +
+ +
+{% endblock %} From c0d6debc185cb98f6ce6096243273e1be2bbd704 Mon Sep 17 00:00:00 2001 From: anuragmenon2011 Date: Fri, 13 Apr 2018 10:37:38 -0400 Subject: [PATCH 05/10] Add files via upload --- .../topologies/overlay/rebuild_instance.html | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 topologies/templates/topologies/overlay/rebuild_instance.html diff --git a/topologies/templates/topologies/overlay/rebuild_instance.html b/topologies/templates/topologies/overlay/rebuild_instance.html new file mode 100644 index 0000000..d31ce7d --- /dev/null +++ b/topologies/templates/topologies/overlay/rebuild_instance.html @@ -0,0 +1,62 @@ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ X +
+

Rebuild Instance

+
+ Instance Name + + {{instance_name}} +
+ Base Image + + +
+ + + + + +   + +
+
From a6b063ccdd75479b4201d27987546edb68f892df Mon Sep 17 00:00:00 2001 From: root Date: Thu, 3 May 2018 16:38:55 +0000 Subject: [PATCH 06/10] Pushing urls.py --- ajax/urls.py | 1 + topologies/urls.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/ajax/urls.py b/ajax/urls.py index 4683488..c85f00f 100644 --- a/ajax/urls.py +++ b/ajax/urls.py @@ -43,6 +43,7 @@ url(r'^deployTopology/$', views.deploy_topology, name='deployTopology'), url(r'^redeployTopology/$', views.redeploy_topology, name='redeployTopology'), url(r'^deployStack/(?P[^/]+)$', views.deploy_stack, name='deployStack'), + url(r'^updateStack/(?P[^/]+)$', views.update_stack, name='updateStack'), url(r'^deleteStack/(?P[^/]+)$', views.delete_stack, name='deleteStack'), url(r'^listSnapshot/(?P[^/]+)$', views.list_snapshot, name='listSnapshot'), diff --git a/topologies/urls.py b/topologies/urls.py index ce47536..ac816ef 100644 --- a/topologies/urls.py +++ b/topologies/urls.py @@ -31,6 +31,9 @@ url(r'^error/$', views.error, name='error'), url(r'^clone/(?P\d+)/$', views.clone, name='clone'), url(r'^createConfigSet/$', views.create_config_set, name='createConfigSet'), + url(r'^createSnapshot/$', views.create_snapshot_topo, name='createSnapshot'), + url(r'^rebuildInstance/$', views.rebuild_instance, name='rebuildInstance'), + url(r'^rebuildServer/$', views.rebuild_server, name='rebuildServer'), url(r'^delete/(?P\d+)/$', views.delete, name='delete'), url(r'^(?P\d+)/$', views.detail, name='detail'), url(r'^launch/(?P\d+)$', views.launch, name='launch'), From b59c3b5df070b4613c6a9b4a43e7adc43f51d8db Mon Sep 17 00:00:00 2001 From: nathan Date: Thu, 3 May 2018 18:09:54 +0000 Subject: [PATCH 07/10] add redirect from redeploy --- ajax/views.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ajax/views.py b/ajax/views.py index 673a73a..5234308 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -861,11 +861,26 @@ def multi_clone_topology(request): def redeploy_topology(request): + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') + logger.debug('=============================================================================================================================') required_fields = set(['json', 'topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) topology_id = request.POST['topologyId'] + logger.debug('HERE') + if configuration.deployment_backend == "openstack": + logger.info('Redirecting to update stack') + return HttpResponseRedirect('/ajax/updateStack/' + topology_id) + + logger.debug('STILL HERE') j = request.POST['json'] try: topo = Topology.objects.get(pk=topology_id) From 3b64c35d0051816f8b91e2dc0dca593ee053f3ba Mon Sep 17 00:00:00 2001 From: nathan Date: Thu, 3 May 2018 19:09:14 +0000 Subject: [PATCH 08/10] fix broken links in executeScript window --- ajax/templates/ajax/scriptOutput.html | 2 +- ajax/templates/ajax/scripts.html | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ajax/templates/ajax/scriptOutput.html b/ajax/templates/ajax/scriptOutput.html index 3d456ea..60f9c3d 100644 --- a/ajax/templates/ajax/scriptOutput.html +++ b/ajax/templates/ajax/scriptOutput.html @@ -16,7 +16,7 @@ - Manage Scripts + Manage Scripts diff --git a/ajax/templates/ajax/scripts.html b/ajax/templates/ajax/scripts.html index 804de90..388fa50 100644 --- a/ajax/templates/ajax/scripts.html +++ b/ajax/templates/ajax/scripts.html @@ -31,7 +31,7 @@ {% endfor %} - Manage Scripts + Manage Scripts From fbbdb2967d07cee2ce9f51b11b695e7c2cc3e225 Mon Sep 17 00:00:00 2001 From: nathan Date: Thu, 3 May 2018 19:10:10 +0000 Subject: [PATCH 09/10] trigger update_stack from redeploy_topology button --- .../ajax/openstackDeploymentStatus.html | 21 ++++--------- ajax/urls.py | 2 +- ajax/views.py | 31 ++++++++----------- topologies/templates/topologies/edit.html | 26 ++++++---------- 4 files changed, 29 insertions(+), 51 deletions(-) diff --git a/ajax/templates/ajax/openstackDeploymentStatus.html b/ajax/templates/ajax/openstackDeploymentStatus.html index 40fe605..312462c 100644 --- a/ajax/templates/ajax/openstackDeploymentStatus.html +++ b/ajax/templates/ajax/openstackDeploymentStatus.html @@ -17,7 +17,7 @@ {% if stack == None %} - + Not yet deployed to OpenStack! @@ -37,7 +37,7 @@ Status - + {{ stack.stack_status }} @@ -45,7 +45,7 @@ Status Detail - + {{ stack.stack_status_reason }} @@ -100,13 +100,6 @@ title="Show HEAT">⚙   - -   - @@ -119,18 +112,16 @@ - + HEAT Snapshots - + Create Snapshots - - - +   List Snapshots diff --git a/ajax/urls.py b/ajax/urls.py index c85f00f..89eebff 100644 --- a/ajax/urls.py +++ b/ajax/urls.py @@ -43,7 +43,7 @@ url(r'^deployTopology/$', views.deploy_topology, name='deployTopology'), url(r'^redeployTopology/$', views.redeploy_topology, name='redeployTopology'), url(r'^deployStack/(?P[^/]+)$', views.deploy_stack, name='deployStack'), - url(r'^updateStack/(?P[^/]+)$', views.update_stack, name='updateStack'), + url(r'^updateStack/$', views.update_stack, name='updateStack'), url(r'^deleteStack/(?P[^/]+)$', views.delete_stack, name='deleteStack'), url(r'^listSnapshot/(?P[^/]+)$', views.list_snapshot, name='listSnapshot'), diff --git a/ajax/views.py b/ajax/views.py index 5234308..cdebcd6 100644 --- a/ajax/views.py +++ b/ajax/views.py @@ -861,26 +861,11 @@ def multi_clone_topology(request): def redeploy_topology(request): - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') - logger.debug('=============================================================================================================================') required_fields = set(['json', 'topologyId']) if not required_fields.issubset(request.POST): return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"}) topology_id = request.POST['topologyId'] - logger.debug('HERE') - if configuration.deployment_backend == "openstack": - logger.info('Redirecting to update stack') - return HttpResponseRedirect('/ajax/updateStack/' + topology_id) - - logger.debug('STILL HERE') j = request.POST['json'] try: topo = Topology.objects.get(pk=topology_id) @@ -889,6 +874,10 @@ def redeploy_topology(request): except ObjectDoesNotExist: return render(request, 'ajax/ajaxError.html', {'error': "Topology doesn't exist"}) + if configuration.deployment_backend == "openstack": + logger.info('Redirecting to update stack') + return update_stack(request) + try: domains = libvirtUtils.get_domains_for_topology(topology_id) config = wistarUtils.load_config_from_topology_json(topo.json, topology_id) @@ -1499,12 +1488,19 @@ def delete_stack(request, topology_id): return HttpResponseRedirect('/topologies/' + topology_id + '/') -def update_stack(request, topology_id): + +def update_stack(request): """ :param request: Django request :param topology_id: id of the topology to export :return: renders the updated heat template """ + required_fields = set(['topologyId']) + if not required_fields.issubset(request.POST): + return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"}) + + topology_id = request.POST['topologyId'] + logger.debug("-----Inside update stack-----") try: topology = Topology.objects.get(pk=topology_id) @@ -1532,8 +1528,7 @@ def update_stack(request, topology_id): logger.debug(heat_template) logger.debug(openstackUtils.update_stack_template(stack_name, heat_template)) - - return HttpResponseRedirect('/topologies/' + topology_id + '/') + return refresh_deployment_status(request) except Exception as e: logger.debug("Caught Exception in deploy") diff --git a/topologies/templates/topologies/edit.html b/topologies/templates/topologies/edit.html index 2be1577..f4aecaa 100644 --- a/topologies/templates/topologies/edit.html +++ b/topologies/templates/topologies/edit.html @@ -2117,7 +2117,7 @@ - @@ -2125,26 +2125,18 @@ -
- - - +   + +   {% if global_config.deployment_backend == "openstack" and is_deployed == true %} - + {% endif %} +
+ Instance Tools
- -
-
+   -
-
-
- - - -
From e568b5c31d96f42eca427a5b401ca9f6ed0ed050 Mon Sep 17 00:00:00 2001 From: anuragmenon2011 Date: Fri, 4 May 2018 12:51:41 -0400 Subject: [PATCH 10/10] Update wistarUtils.py changes for fixed ips --- common/lib/wistarUtils.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/common/lib/wistarUtils.py b/common/lib/wistarUtils.py index ed2fa51..2bd0a04 100644 --- a/common/lib/wistarUtils.py +++ b/common/lib/wistarUtils.py @@ -406,10 +406,14 @@ def get_heat_json_from_topology_config_for_update(config, port_list): for p in device["interfaces"]: port = dict() port["port"] = dict() - if device["name"] + "_port" + str(index) in port_list: - port["port"]["get_resource"] = device["name"] + "_port" + str(index) + "_nora" - else: + + if index == 0: port["port"]["get_resource"] = device["name"] + "_port" + str(index) + else: + if device["name"] + "_port" + str(index) in port_list: + port["port"]["get_resource"] = device["name"] + "_port" + str(index) + "_nora" + else: + port["port"]["get_resource"] = device["name"] + "_port" + str(index) index += 1 dr["properties"]["networks"].append(port) @@ -523,19 +527,22 @@ def get_heat_json_from_topology_config_for_update(config, port_list): # disable port security on all other ports (in case this isn't set globally) p['port_security_enabled'] = False - if device["name"] + "_port" + str(index) in port_list: - p["name"] = device["name"] + "_port" + str(index) + "_nora" - else: + if index == 0: p["name"] = device["name"] + "_port" + str(index) - + else: + if device["name"] + "_port" + str(index) in port_list: + p["name"] = device["name"] + "_port" + str(index) + "_nora" + else: + p["name"] = device["name"] + "_port" + str(index) pr["properties"] = p - if device["name"] + "_port" + str(index) in port_list: - template["resources"][device["name"] + "_port" + str(index) + "_nora"] = pr - else: + if index == 0: template["resources"][device["name"] + "_port" + str(index)] = pr - - + else: + if device["name"] + "_port" + str(index) in port_list: + template["resources"][device["name"] + "_port" + str(index) + "_nora"] = pr + else: + template["resources"][device["name"] + "_port" + str(index)] = pr index += 1 return json.dumps(template)