diff --git a/playbooks/files/dynamic_inventory.py b/playbooks/files/dynamic_inventory.py new file mode 100755 index 000000000..6f63bffc4 --- /dev/null +++ b/playbooks/files/dynamic_inventory.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python +# +# Copyright (c) 2024 Cisco and/or its affiliates. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__copyright__ = "Copyright (c) 2024 Cisco and/or its affiliates." +__author__ = "Allen Robel" + +import json +from os import environ + +""" +# Summary + +Optional dynamic inventory for the ansible-dcnm repository +integration tests. This inventory is built from environment +variables. + +## Usage + +### Mandatory general variables + +The following general environment variables are related +to credentials, controller reachability, and role/testcase +assignment. These should be considered mandatory; though +the NXOS_* variables are not strictly needed unless called +for by the specific role/testcase. + +Values below are examples, and should be modified for your +setup and the roles/testcases you are running. + +```bash +export ND_ROLE=dcnm_vrf # The role to run +export ND_TESTCASE=query # The testcase to run +export ND_IP4=10.1.1.1 # Controller IPv4 address +export ND_PASSWORD=MyPassword # Controller password +export ND_USERNAME=admin # Controller username +export NXOS_PASSWORD=MyPassword # Switch password +export NXOS_USERNAME=admin # Switch username +``` + +### Fabrics + +We can add more fabrics later as the need arises... + +```bash +export ND_FABRIC_1=MyFabric1 # Assigned to var fabric_1 +export ND_FABRIC_2=MyFabric2 # Assigned to var fabric_2 +export ND_FABRIC_3=MyFabric3 # Assigned to var fabric_3 + +``` + +### Interfaces + +Interface usage varies by testcase. See individual +testcase YAML files for details regarding each test's +usage. + +#### Interface naming convention + +##### Environment variables + +ND_INTERFACE_[A][b] + +Where: + +A - The number of the switch to which the interface belongs +b - An incrementing lower-case letter in range a-z + +###### Examples: + +```bash +export ND_INTERFACE_1a=Ethernet1/1 +export ND_INTERFACE_2a=Ethernet1/1 +export ND_INTERFACE_2b=Ethernet1/2 +export ND_INTERFACE_3a=Ethernet2/4 +``` + +Above: + +- switch_1 has one interface; Ethernet1/1 +- switch_2 two interfaces; Ethernet1/1 and Ethernet1/2 +- switch_3 has one interface; Ethernet2/4 + +##### Test case variables + +Interface variables within test cases follow the same convention +as above, but are lowercase, and remove the leading ND_. + +###### Examples + +interface_1a - 1st interface on switch_1 +interface_1b - 2st interface on switch_1 +etc... + +""" +nd_role = environ.get("ND_ROLE", "dcnm_vrf") +nd_testcase = environ.get("ND_TESTCASE", "query") + +fabric_1 = environ.get("ND_FABRIC_1") +fabric_2 = environ.get("ND_FABRIC_1") +fabric_3 = environ.get("ND_FABRIC_1") + +nd_ip4 = environ.get("ND_IP4") +nd_password = environ.get("ND_PASSWORD") +nd_username = environ.get("ND_USERNAME", "admin") +nxos_password = environ.get("NXOS_PASSWORD") +nxos_username = environ.get("NXOS_USERNAME", "admin") + +# Base set of switches +bgw_1 = environ.get("ND_BGW_1_IP4", "10.1.1.211") +bgw_2 = environ.get("ND_BGW_2_IP4", "10.1.1.212") +leaf_1 = environ.get("ND_LEAF_1_IP4", "10.1.1.106") +leaf_2 = environ.get("ND_LEAF_2_IP4", "10.1.1.107") +leaf_3 = environ.get("ND_LEAF_3_IP4", "10.1.1.108") +leaf_4 = environ.get("ND_LEAF_4_IP4", "10.1.1.109") +spine_1 = environ.get("ND_SPINE_1_IP4", "10.1.1.112") +spine_2 = environ.get("ND_SPINE_2_IP4", "10.1.1.113") + +# Placeholders if you'd rather directly set each of +# the switch vars instead of setting the switch vars +# from the switch roles above. +switch_1 = environ.get("ND_SWITCH_1_IP4", "10.1.1.112") +switch_2 = environ.get("ND_SWITCH_2_IP4", "10.1.1.113") +switch_3 = environ.get("ND_SWITCH_3_IP4", "10.1.1.108") + +# Base set of interfaces +interface_1a = environ.get("ND_INTERFACE_1a", "Ethernet1/1") +interface_2a = environ.get("ND_INTERFACE_2a", "Ethernet1/1") +interface_2b = environ.get("ND_INTERFACE_2b", "Ethernet1/2") +interface_3a = environ.get("ND_INTERFACE_3a", "Ethernet1/3") + +if nd_role == "dcnm_vrf": + pass + # VXLAN/EVPN Fabric Name + # fabric_1 + # - all tests + # switch_1 + # - all tests + # - vrf capable + # switch_2 + # - all tests + # - vrf-lite capable + # switch_3 + # - merged + # - NOT vrf-lite capable + # interface_1a + # - no tests + # interface_2a + # - [deleted, merged, overridden, query, replaced, vrf_lite] + # - switch_2 VRF LITE extensions + # interface_2b + # - [vrf_lite] + # - switch_2 VRF LITE extensions + # interface_3a + # - [merged, vrf_lite] + # - switch_3 non-vrf-lite capable switch + # +elif nd_role == "vrf_lite": + # VXLAN/EVPN Fabric Name + # Uses fabric_1 + # switch_1: vrf-lite capable + switch_1 = spine_1 + # switch_2: vrf-lite capable + switch_2 = spine_2 + # switch_3: vrf capable + switch_3 = bgw_1 +elif nd_role == "scale": + pass +else: + switch_1 = leaf_1 + switch_2 = spine_1 + switch_3 = bgw_1 + +# output is printed to STDOUT, where ansible-playbook -i reads it. +# If you change any vars above, be sure to add them below. +# We'll clean this up as the integration test vars are standardized. + +output = { + "_meta": {"hostvars": {}}, + "all": { + "children": ["ungrouped", "dcnm", "ndfc", "nxos"], + "vars": { + "ansible_httpapi_use_ssl": "true", + "ansible_httpapi_validate_certs": "false", + "ansible_password": nd_password, + "ansible_python_interpreter": "python", + "ansible_user": nd_username, + "testcase": nd_testcase, + "fabric_1": fabric_1, + "fabric_2": fabric_2, + "fabric_3": fabric_3, + "bgw1": bgw_1, + "bgw2": bgw_2, + "leaf1": leaf_1, + "leaf2": leaf_2, + "leaf_1": leaf_1, + "leaf_2": leaf_2, + "leaf3": leaf_3, + "leaf4": leaf_4, + "nxos_username": nxos_username, + "nxos_password": nxos_password, + "switch_password": nxos_password, + "switch_username": nxos_username, + "spine1": spine_1, + "spine2": spine_2, + "switch1": switch_1, + "switch2": switch_2, + "switch_1": switch_1, + "switch_2": switch_2, + "switch_3": switch_3, + "interface_1a": interface_1a, + "interface_2a": interface_2a, + "interface_2b": interface_2b, + "interface_3a": interface_3a, + }, + }, + "dcnm": { + "hosts": [nd_ip4], + "vars": { + "ansible_connection": "ansible.netcommon.httpapi", + "ansible_network_os": "cisco.dcnm.dcnm", + }, + }, + "ndfc": { + "hosts": [nd_ip4], + "vars": { + "ansible_connection": "ansible.netcommon.httpapi", + "ansible_network_os": "cisco.dcnm.dcnm", + }, + }, + "nxos": { + "children": [ + "bgw1", + "bgw2", + "leaf_1", + "leaf_2", + "leaf1", + "leaf2", + "leaf3", + "leaf4", + "spine1", + "spine2", + "switch1", + "switch2", + ], + "vars": { + "ansible_become": "true", + "ansible_become_method": "enable", + "ansible_connection": "ansible.netcommon.httpapi", + "ansible_network_os": "cisco.nxos.nxos", + }, + }, + "bgw1": {"hosts": [bgw_1]}, + "bgw2": {"hosts": [bgw_2]}, + "leaf_1": {"hosts": [leaf_1]}, + "leaf_2": {"hosts": [leaf_2]}, + "leaf1": {"hosts": [leaf_1]}, + "leaf2": {"hosts": [leaf_2]}, + "leaf3": {"hosts": [leaf_3]}, + "leaf4": {"hosts": [leaf_4]}, + "spine1": {"hosts": [spine_1]}, + "spine2": {"hosts": [spine_2]}, + "switch1": {"hosts": [switch_1]}, + "switch2": {"hosts": [switch_2]}, +} + +print(json.dumps(output)) diff --git a/playbooks/roles/dcnm_vrf/dcnm_hosts.yaml b/playbooks/roles/dcnm_vrf/dcnm_hosts.yaml new file mode 100644 index 000000000..109612797 --- /dev/null +++ b/playbooks/roles/dcnm_vrf/dcnm_hosts.yaml @@ -0,0 +1,15 @@ +all: + vars: + ansible_user: "admin" + ansible_password: "password-ndfc" + switch_password: "password-switch" + ansible_python_interpreter: python + ansible_httpapi_validate_certs: False + ansible_httpapi_use_ssl: True + children: + ndfc: + vars: + ansible_connection: ansible.netcommon.httpapi + ansible_network_os: cisco.dcnm.dcnm + hosts: + 192.168.1.1: diff --git a/playbooks/roles/dcnm_vrf/dcnm_tests.yaml b/playbooks/roles/dcnm_vrf/dcnm_tests.yaml new file mode 100644 index 000000000..e4e9c90aa --- /dev/null +++ b/playbooks/roles/dcnm_vrf/dcnm_tests.yaml @@ -0,0 +1,51 @@ +--- +# This playbook can be used to execute integration tests for +# the roles located in: +# +# REPO_ROOT/tests/integration/targets/dcnm_vrf/tests/dcnm/*.yaml +# +# Either: +# +# 1. Modify the following: +# +# - The vars section below with details for your testing setup. +# - dcnm_hosts.yaml in this directory +# +# 2. Run the tests +# +# ansible-playbook dcnm_tests.yaml -i dcnm_hosts.yaml +# +# OR: +# +# 1. Modify ../../files/dynamic_inventory.py to align with your setup +# +# This must contain the vars mentioned below and controller +# info from dcnm_hosts.yaml (modified for your setup) +# +# 2. Run the tests +# +# ansible-playbook dcnm_tests.yaml -i ../../files/dynamic_inventory.py +# +# +- hosts: dcnm + gather_facts: no + connection: ansible.netcommon.httpapi +# Uncomment and modify if not using dynamic_inventory.py +# See the individual test yaml files for a description of +# how each var below is used in each test. Some tests, +# for example, do not use interface_1. +# vars: + # fabric_1: f1 + # switch_1: 10.1.1.2 + # switch_2: 10.1.1.3 + # switch_3: 10.1.1.4 + # interface_1: Ethernet1/1 + # interface_2: Ethernet1/2 + # interface_3: Ethernet1/3 + ## Uncomment ONE of the following testcases + # testcase: deleted + # testcase: merged + # testcase: query + + roles: + - dcnm_vrf diff --git a/plugins/modules/dcnm_vrf.py b/plugins/modules/dcnm_vrf.py index f8c5e4645..f6a637c5a 100644 --- a/plugins/modules/dcnm_vrf.py +++ b/plugins/modules/dcnm_vrf.py @@ -16,9 +16,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -__author__ = ( - "Shrishail Kariyappanavar, Karthik Babu Harichandra Babu, Praveen Ramoorthy" -) +__author__ = "Shrishail Kariyappanavar, Karthik Babu Harichandra Babu, Praveen Ramoorthy, Allen Robel" DOCUMENTATION = """ --- @@ -562,70 +560,86 @@ - vrf_name: ansible-vrf-r1 - vrf_name: ansible-vrf-r2 """ - -import json -import time -import copy import ast +import copy +import inspect +import json +import logging import re -from ansible_collections.cisco.dcnm.plugins.module_utils.network.dcnm.dcnm import ( - get_fabric_inventory_details, - dcnm_send, - validate_list_of_dicts, - dcnm_get_ip_addr_info, - get_ip_sn_dict, - get_fabric_details, - get_ip_sn_fabric_dict, - dcnm_version_supported, - dcnm_get_url, -) +import time + from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.dcnm.plugins.module_utils.network.dcnm.dcnm import ( + dcnm_get_url, dcnm_send, dcnm_version_supported, get_fabric_details, + get_fabric_inventory_details, get_ip_sn_dict, get_ip_sn_fabric_dict, + validate_list_of_dicts) + +from ..module_utils.common.log_v2 import Log + +dcnm_vrf_paths = { + 11: { + "GET_VRF": "/rest/top-down/fabrics/{}/vrfs", + "GET_VRF_ATTACH": "/rest/top-down/fabrics/{}/vrfs/attachments?vrf-names={}", + "GET_VRF_SWITCH": "/rest/top-down/fabrics/{}/vrfs/switches?vrf-names={}&serial-numbers={}", + "GET_VRF_ID": "/rest/managed-pool/fabrics/{}/partitions/ids", + "GET_VLAN": "/rest/resource-manager/vlan/{}?vlanUsageType=TOP_DOWN_VRF_VLAN", + }, + 12: { + "GET_VRF": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfs", + "GET_VRF_ATTACH": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfs/attachments?vrf-names={}", + "GET_VRF_SWITCH": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfs/switches?vrf-names={}&serial-numbers={}", + "GET_VRF_ID": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfinfo", + "GET_VLAN": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/resource-manager/vlan/{}?vlanUsageType=TOP_DOWN_VRF_VLAN", + }, +} class DcnmVrf: - dcnm_vrf_paths = { - 11: { - "GET_VRF": "/rest/top-down/fabrics/{}/vrfs", - "GET_VRF_ATTACH": "/rest/top-down/fabrics/{}/vrfs/attachments?vrf-names={}", - "GET_VRF_SWITCH": "/rest/top-down/fabrics/{}/vrfs/switches?vrf-names={}&serial-numbers={}", - "GET_VRF_ID": "/rest/managed-pool/fabrics/{}/partitions/ids", - "GET_VLAN": "/rest/resource-manager/vlan/{}?vlanUsageType=TOP_DOWN_VRF_VLAN", - }, - 12: { - "GET_VRF": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfs", - "GET_VRF_ATTACH": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfs/attachments?vrf-names={}", - "GET_VRF_SWITCH": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfs/switches?vrf-names={}&serial-numbers={}", - "GET_VRF_ID": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/top-down/fabrics/{}/vrfinfo", - "GET_VLAN": "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/resource-manager/vlan/{}?vlanUsageType=TOP_DOWN_VRF_VLAN", - }, - } - def __init__(self, module): + self.class_name = self.__class__.__name__ + + self.log = logging.getLogger(f"dcnm.{self.class_name}") self.module = module self.params = module.params + self.state = self.params.get("state") + + msg = f"self.state: {self.state}, " + msg += "self.params: " + msg += f"{json.dumps(self.params, indent=4, sort_keys=True)}" + self.log.debug(msg) + self.fabric = module.params["fabric"] self.config = copy.deepcopy(module.params.get("config")) + + msg = f"self.state: {self.state}, " + msg += "self.config: " + msg += f"{json.dumps(self.config, indent=4, sort_keys=True)}" + self.log.debug(msg) + self.check_mode = False self.have_create = [] self.want_create = [] self.diff_create = [] self.diff_create_update = [] - # This variable is created specifically to hold all the create payloads which are missing a - # vrfId. These payloads are sent to DCNM out of band (basically in the get_diff_merge()) - # We lose diffs for these without this variable. The content stored here will be helpful for - # cases like "check_mode" and to print diffs[] in the output of each task. + # self.diff_create_quick holds all the create payloads which are + # missing a vrfId. These payloads are sent to DCNM out of band + # (in the get_diff_merge()). We lose diffs for these without this + # variable. The content stored here will be helpful for cases like + # "check_mode" and to print diffs[] in the output of each task. self.diff_create_quick = [] self.have_attach = [] self.want_attach = [] self.diff_attach = [] self.validated = [] - # diff_detach is to list all attachments of a vrf being deleted, especially for state: OVERRIDDEN - # The diff_detach and delete operations have to happen before create+attach+deploy for vrfs being created. - # This is specifically to address cases where VLAN from a vrf which is being deleted is used for another - # vrf. Without this additional logic, the create+attach+deploy go out first and complain the VLAN is already - # in use. + # diff_detach contains all attachments of a vrf being deleted, + # especially for state: OVERRIDDEN + # The diff_detach and delete operations have to happen before + # create+attach+deploy for vrfs being created. This is to address + # cases where VLAN from a vrf which is being deleted is used for + # another vrf. Without this additional logic, the create+attach+deploy + # go out first and complain the VLAN is already in use. self.diff_detach = [] self.have_deploy = {} self.want_deploy = {} @@ -637,27 +651,139 @@ def __init__(self, module): self.dcnm_version = dcnm_version_supported(self.module) self.inventory_data = get_fabric_inventory_details(self.module, self.fabric) self.ip_sn, self.hn_sn = get_ip_sn_dict(self.inventory_data) + self.sn_ip = {value: key for (key, value) in self.ip_sn.items()} self.fabric_data = get_fabric_details(self.module, self.fabric) self.fabric_type = self.fabric_data.get("fabricType") self.ip_fab, self.sn_fab = get_ip_sn_fabric_dict(self.inventory_data) if self.dcnm_version > 12: - self.paths = self.dcnm_vrf_paths[12] + self.paths = dcnm_vrf_paths[12] else: - self.paths = self.dcnm_vrf_paths[self.dcnm_version] + self.paths = dcnm_vrf_paths[self.dcnm_version] - self.result = dict(changed=False, diff=[], response=[]) + self.result = {"changed": False, "diff": [], "response": []} self.failed_to_rollback = False self.WAIT_TIME_FOR_DELETE_LOOP = 5 # in seconds + self.vrf_lite_properties = [ + "DOT1Q_ID", + "IF_NAME", + "IP_MASK", + "IPV6_MASK", + "IPV6_NEIGHBOR", + "NEIGHBOR_IP", + "PEER_VRF_NAME", + ] + + msg = "DONE" + self.log.debug(msg) + + @staticmethod + def find_dict_in_list_by_key_value(search: list, key: str, value: str): + """ + # Summary + + Find a dictionary in a list of dictionaries. + + + ## Raises + + None + + ## Parameters + + - search: A list of dict + - key: The key to lookup in each dict + - value: The desired matching value for key + + ## Returns + + Either the first matching dict or None + + ## Usage + + ```python + content = [{"foo": "bar"}, {"foo": "baz"}] + + match = find_dict_in_list_by_key_value(search=content, key="foo", value="baz") + print(f"{match}") + # -> {"foo": "baz"} + + match = find_dict_in_list_by_key_value(search=content, key="foo", value="bingo") + print(f"{match}") + # -> None + ``` + """ + match = (d for d in search if d[key] == value) + return next(match, None) + + # pylint: disable=inconsistent-return-statements + def to_bool(self, key, dict_with_key): + """ + # Summary + + Given a dictionary and key, access dictionary[key] and + try to convert the value therein to a boolean. + + - If the value is a boolean, return a like boolean. + - If the value is a boolean-like string (e.g. "false" + "True", etc), return the value converted to boolean. + + ## Raises + + - Call fail_json() if the value is not convertable to boolean. + """ + value = dict_with_key.get(key) + if value in ["false", "False", False]: + return False + if value in ["true", "True", True]: + return True + + caller = inspect.stack()[1][3] + + msg = f"caller: {caller}: " + msg += f"{str(value)} with type {type(value)} " + msg += "is not convertable to boolean" + self.module.fail_json(msg=msg) + + # pylint: enable=inconsistent-return-statements + @staticmethod + def compare_properties(dict1, dict2, property_list): + """ + Given two dictionaries and a list of keys: + + - Return True if all property values match. + - Return False otherwise + """ + for prop in property_list: + if dict1.get(prop) != dict2.get(prop): + return False + return True + def diff_for_attach_deploy(self, want_a, have_a, replace=False): + """ + # Summary + + Return attach_list, deploy_vrf + + Where: + + - attach list is a list of attachment differences + - deploy_vrf is a boolean + + ## Raises + + None + """ + msg = f"ENTERED with replace == {replace}" + self.log.debug(msg) attach_list = [] if not want_a: return attach_list - dep_vrf = False + deploy_vrf = False for want in want_a: found = False interface_match = False @@ -665,7 +791,9 @@ def diff_for_attach_deploy(self, want_a, have_a, replace=False): for have in have_a: if want["serialNumber"] == have["serialNumber"]: # handle instanceValues first - want.update({"freeformConfig": have["freeformConfig"]}) # copy freeformConfig from have as module is not managing it + want.update( + {"freeformConfig": have.get("freeformConfig", "")} + ) # copy freeformConfig from have as module is not managing it want_inst_values = {} have_inst_values = {} if ( @@ -675,19 +803,40 @@ def diff_for_attach_deploy(self, want_a, have_a, replace=False): want_inst_values = ast.literal_eval(want["instanceValues"]) have_inst_values = ast.literal_eval(have["instanceValues"]) - # update unsupported paramters using using have + # update unsupported parameters using have # Only need ipv4 or ipv6. Don't require both, but both can be supplied (as per the GUI) - want_inst_values.update({"loopbackId": have_inst_values["loopbackId"]}) + if "loopbackId" in have_inst_values: + want_inst_values.update( + {"loopbackId": have_inst_values["loopbackId"]} + ) if "loopbackIpAddress" in have_inst_values: - want_inst_values.update({"loopbackIpAddress": have_inst_values["loopbackIpAddress"]}) + want_inst_values.update( + { + "loopbackIpAddress": have_inst_values[ + "loopbackIpAddress" + ] + } + ) if "loopbackIpV6Address" in have_inst_values: - want_inst_values.update({"loopbackIpV6Address": have_inst_values["loopbackIpV6Address"]}) + want_inst_values.update( + { + "loopbackIpV6Address": have_inst_values[ + "loopbackIpV6Address" + ] + } + ) - want.update({"instanceValues": json.dumps(want_inst_values)}) + want.update( + {"instanceValues": json.dumps(want_inst_values)} + ) if ( want["extensionValues"] != "" and have["extensionValues"] != "" ): + + msg = "want[extensionValues] != '' and have[extensionValues] != ''" + self.log.debug(msg) + want_ext_values = want["extensionValues"] want_ext_values = ast.literal_eval(want_ext_values) have_ext_values = have["extensionValues"] @@ -696,7 +845,10 @@ def diff_for_attach_deploy(self, want_a, have_a, replace=False): want_e = ast.literal_eval(want_ext_values["VRF_LITE_CONN"]) have_e = ast.literal_eval(have_ext_values["VRF_LITE_CONN"]) - if replace and (len(want_e["VRF_LITE_CONN"]) != len(have_e["VRF_LITE_CONN"])): + if replace and ( + len(want_e["VRF_LITE_CONN"]) + != len(have_e["VRF_LITE_CONN"]) + ): # In case of replace/override if the length of want and have lite attach of a switch # is not same then we have to push the want to NDFC. No further check is required for # this switch @@ -706,59 +858,18 @@ def diff_for_attach_deploy(self, want_a, have_a, replace=False): for hlite in have_e["VRF_LITE_CONN"]: found = False interface_match = False - if wlite["IF_NAME"] == hlite["IF_NAME"]: - found = True - interface_match = True - if wlite["DOT1Q_ID"]: - if ( - wlite["DOT1Q_ID"] - != hlite["DOT1Q_ID"] - ): - found = False - break - - if wlite["IP_MASK"]: - if ( - wlite["IP_MASK"] - != hlite["IP_MASK"] - ): - found = False - break - - if wlite["NEIGHBOR_IP"]: - if ( - wlite["NEIGHBOR_IP"] - != hlite["NEIGHBOR_IP"] - ): - found = False - break - - if wlite["IPV6_MASK"]: - if ( - wlite["IPV6_MASK"] - != hlite["IPV6_MASK"] - ): - found = False - break - - if wlite["IPV6_NEIGHBOR"]: - if ( - wlite["IPV6_NEIGHBOR"] - != hlite["IPV6_NEIGHBOR"] - ): - found = False - break - - if wlite["PEER_VRF_NAME"]: - if ( - wlite["PEER_VRF_NAME"] - != hlite["PEER_VRF_NAME"] - ): - found = False - break - - if found: - break + if wlite["IF_NAME"] != hlite["IF_NAME"]: + continue + found = True + interface_match = True + if not self.compare_properties( + wlite, hlite, self.vrf_lite_properties + ): + found = False + break + + if found: + break if interface_match and not found: break @@ -781,26 +892,56 @@ def diff_for_attach_deploy(self, want_a, have_a, replace=False): found = True else: found = True + msg = f"want_is_deploy: {str(want.get('want_is_deploy'))}, " + msg += f"have_is_deploy: {str(want.get('have_is_deploy'))}" + self.log.debug(msg) - if want.get("isAttached") is not None: - if bool(have["isAttached"]) is not bool( - want["isAttached"] - ): + want_is_deploy = self.to_bool("is_deploy", want) + have_is_deploy = self.to_bool("is_deploy", have) + + msg = f"want_is_attached: {str(want.get('want_is_attached'))}, " + msg += ( + f"want_is_attached: {str(want.get('want_is_attached'))}" + ) + self.log.debug(msg) + + want_is_attached = self.to_bool("isAttached", want) + have_is_attached = self.to_bool("isAttached", have) + + if have_is_attached != want_is_attached: + + if "isAttached" in want: del want["isAttached"] - want["deployment"] = True - attach_list.append(want) - if bool(want["is_deploy"]): - dep_vrf = True - continue - if ((bool(want["deployment"]) is not bool(have["deployment"])) or - (bool(want["is_deploy"]) is not bool(have["is_deploy"]))): - if bool(want["is_deploy"]): - dep_vrf = True + want["deployment"] = True + attach_list.append(want) + if want_is_deploy is True: + if "isAttached" in want: + del want["isAttached"] + deploy_vrf = True + continue - for k, v in want_inst_values.items(): - if v != have_inst_values.get(k, ""): - found = False + msg = ( + f"want_deployment: {str(want.get('want_deployment'))}, " + ) + msg += ( + f"have_deployment: {str(want.get('have_deployment'))}" + ) + self.log.debug(msg) + + want_deployment = self.to_bool("deployment", want) + have_deployment = self.to_bool("deployment", have) + + if (want_deployment != have_deployment) or ( + want_is_deploy != have_is_deploy + ): + if want_is_deploy is True: + deploy_vrf = True + + if self.dict_values_differ(want_inst_values, have_inst_values): + msg = "dict values differ. Set found = False" + self.log.debug(msg) + found = False if found: break @@ -809,131 +950,208 @@ def diff_for_attach_deploy(self, want_a, have_a, replace=False): break if not found: - if bool(want["isAttached"]): + msg = f"isAttached: {str(want.get('isAttached'))}, " + msg += f"is_deploy: {str(want.get('is_deploy'))}" + self.log.debug(msg) + + if self.to_bool("isAttached", want): del want["isAttached"] want["deployment"] = True attach_list.append(want) - if bool(want["is_deploy"]): - dep_vrf = True - - return attach_list, dep_vrf - - def update_attach_params(self, attach, vrf_name, deploy, vlanId): + if self.to_bool("is_deploy", want): + deploy_vrf = True + + msg = "Returning " + msg += f"deploy_vrf: {deploy_vrf}, " + msg += "attach_list: " + msg += f"{json.dumps(attach_list, indent=4, sort_keys=True)}" + self.log.debug(msg) + return attach_list, deploy_vrf + + def update_attach_params_extension_values(self, attach) -> dict: + """ + # Summary + + Given an attachment object (see example below): + + - Return a populated extension_values dictionary + if the attachment object's vrf_lite parameter is + not null. + - Return an empty dictionary if the attachment object's + vrf_lite parameter is null. + + ## Raises + + Calls fail_json() if the vrf_lite parameter is not null + and the role of the switch in the attachment object is not + one of the various border roles. + + ## Example attach object + + - extensionValues content removed for brevity + - instanceValues content removed for brevity + + ```json + { + "deployment": true, + "export_evpn_rt": "", + "extensionValues": "{}", + "fabric": "f1", + "freeformConfig": "", + "import_evpn_rt": "", + "instanceValues": "{}", + "isAttached": true, + "is_deploy": true, + "serialNumber": "FOX2109PGCS", + "vlan": 500, + "vrfName": "ansible-vrf-int1", + "vrf_lite": [ + { + "dot1q": 2, + "interface": "Ethernet1/2", + "ipv4_addr": "10.33.0.2/30", + "ipv6_addr": "2010::10:34:0:7/64", + "neighbor_ipv4": "10.33.0.1", + "neighbor_ipv6": "2010::10:34:0:3", + "peer_vrf": "ansible-vrf-int1" + } + ] + } + ``` - vrf_ext = False + """ + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") - if not attach: + if not attach["vrf_lite"]: + msg = "Early return. No vrf_lite extensions to process." + self.log.debug(msg) return {} - serial = "" - attach["ip_address"] = dcnm_get_ip_addr_info( - self.module, attach["ip_address"], None, None - ) - for ip, ser in self.ip_sn.items(): - if ip == attach["ip_address"]: - serial = ser + extension_values: dict = {} + extension_values["VRF_LITE_CONN"] = [] + ms_con: dict = {} + ms_con["MULTISITE_CONN"] = [] + extension_values["MULTISITE_CONN"] = json.dumps(ms_con) - if not serial: - self.module.fail_json( - msg="Fabric: {0} does not have the switch: {1}".format( - self.fabric, attach["ip_address"] - ) - ) + # Before applying the vrf_lite config, verify that the + # switch role begins with border role = self.inventory_data[attach["ip_address"]].get("switchRole") - if role.lower() == "spine" or role.lower() == "super spine": - msg = "VRFs cannot be attached to switch {0} with role {1}".format( - attach["ip_address"], role - ) + if not re.search(r"\bborder\b", role.lower()): + msg = f"{self.class_name}.{method_name}: " + msg += f"VRF LITE cannot be attached to switch {attach['ip_address']} " + msg += f"with role {role}" self.module.fail_json(msg=msg) - ext_values = {} - ext_values["VRF_LITE_CONN"] = [] - ms_con = {} - ms_con["MULTISITE_CONN"] = [] - ext_values["MULTISITE_CONN"] = json.dumps(ms_con) + for item in attach["vrf_lite"]: + + # If the playbook contains vrf lite parameters + # update the extension values. + vrf_lite_conn = {} + for param in self.vrf_lite_properties: + vrf_lite_conn[param] = "" + + if item["interface"]: + vrf_lite_conn["IF_NAME"] = item["interface"] + if item["dot1q"]: + vrf_lite_conn["DOT1Q_ID"] = str(item["dot1q"]) + if item["ipv4_addr"]: + vrf_lite_conn["IP_MASK"] = item["ipv4_addr"] + if item["neighbor_ipv4"]: + vrf_lite_conn["NEIGHBOR_IP"] = item["neighbor_ipv4"] + if item["ipv6_addr"]: + vrf_lite_conn["IPV6_MASK"] = item["ipv6_addr"] + if item["neighbor_ipv6"]: + vrf_lite_conn["IPV6_NEIGHBOR"] = item["neighbor_ipv6"] + if item["peer_vrf"]: + vrf_lite_conn["PEER_VRF_NAME"] = item["peer_vrf"] + + vrf_lite_conn["VRF_LITE_JYTHON_TEMPLATE"] = "Ext_VRF_Lite_Jython" + + msg = ( + f"vrf_lite_conn: {json.dumps(vrf_lite_conn, indent=4, sort_keys=True)}" + ) + self.log.debug(msg) - if attach["vrf_lite"]: - """Before apply the vrf_lite config, need double check if the swtich role is started wth Border""" - r = re.search(r"\bborder\b", role.lower()) - if not r: - msg = "VRF LITE cannot be attached to switch {0} with role {1}".format( - attach["ip_address"], role + vrf_lite_connections: dict = {} + vrf_lite_connections["VRF_LITE_CONN"] = [] + vrf_lite_connections["VRF_LITE_CONN"].append(copy.deepcopy(vrf_lite_conn)) + + if extension_values["VRF_LITE_CONN"]: + extension_values["VRF_LITE_CONN"]["VRF_LITE_CONN"].extend( + vrf_lite_connections["VRF_LITE_CONN"] ) - self.module.fail_json(msg=msg) + else: + extension_values["VRF_LITE_CONN"] = copy.deepcopy(vrf_lite_connections) - at_lite = attach["vrf_lite"] - for a_l in at_lite: - if ( - a_l["interface"] - or a_l["dot1q"] - or a_l["ipv4_addr"] - or a_l["neighbor_ipv4"] - or a_l["ipv6_addr"] - or a_l["neighbor_ipv6"] - or a_l["peer_vrf"] - ): + extension_values["VRF_LITE_CONN"] = json.dumps( + extension_values["VRF_LITE_CONN"] + ) - """if vrf lite elements are provided by the user in the playbook fill the extension values""" - vrflite_con = {} - vrflite_con["VRF_LITE_CONN"] = [] - vrflite_con["VRF_LITE_CONN"].append({}) + msg = "Returning extension_values: " + msg += f"{json.dumps(extension_values, indent=4, sort_keys=True)}" + self.log.debug(msg) - if a_l["interface"]: - vrflite_con["VRF_LITE_CONN"][0]["IF_NAME"] = a_l["interface"] - else: - vrflite_con["VRF_LITE_CONN"][0]["IF_NAME"] = "" + return copy.deepcopy(extension_values) - if a_l["dot1q"]: - vrflite_con["VRF_LITE_CONN"][0]["DOT1Q_ID"] = str(a_l["dot1q"]) - else: - vrflite_con["VRF_LITE_CONN"][0]["DOT1Q_ID"] = "" + def update_attach_params(self, attach, vrf_name, deploy, vlan_id) -> dict: + """ + # Summary - if a_l["ipv4_addr"]: - vrflite_con["VRF_LITE_CONN"][0]["IP_MASK"] = a_l["ipv4_addr"] - else: - vrflite_con["VRF_LITE_CONN"][0]["IP_MASK"] = "" + Turn an attachment object (attach) into a payload for the controller. - if a_l["neighbor_ipv4"]: - vrflite_con["VRF_LITE_CONN"][0]["NEIGHBOR_IP"] = a_l[ - "neighbor_ipv4" - ] - else: - vrflite_con["VRF_LITE_CONN"][0]["NEIGHBOR_IP"] = "" + ## Raises - if a_l["ipv6_addr"]: - vrflite_con["VRF_LITE_CONN"][0]["IPV6_MASK"] = a_l["ipv6_addr"] - else: - vrflite_con["VRF_LITE_CONN"][0]["IPV6_MASK"] = "" + Calls fail_json() if: - if a_l["neighbor_ipv6"]: - vrflite_con["VRF_LITE_CONN"][0]["IPV6_NEIGHBOR"] = a_l[ - "neighbor_ipv6" - ] - else: - vrflite_con["VRF_LITE_CONN"][0]["IPV6_NEIGHBOR"] = "" + - The switch in the attachment object is a spine + - If the vrf_lite object is not null, and the switch is not + a border switch + """ + self.log.debug("ENTERED") - if a_l["peer_vrf"]: - vrflite_con["VRF_LITE_CONN"][0]["PEER_VRF_NAME"] = a_l["peer_vrf"] - else: - vrflite_con["VRF_LITE_CONN"][0]["PEER_VRF_NAME"] = "" + if not attach: + msg = "Early return. No attachments to process." + self.log.debug(msg) + return {} - vrflite_con["VRF_LITE_CONN"][0][ - "VRF_LITE_JYTHON_TEMPLATE" - ] = "Ext_VRF_Lite_Jython" - if (ext_values["VRF_LITE_CONN"]): - ext_values["VRF_LITE_CONN"]["VRF_LITE_CONN"].extend(vrflite_con["VRF_LITE_CONN"]) - else: - ext_values["VRF_LITE_CONN"] = vrflite_con + # Not sure why this was needed? + # attach["ip_address"] = dcnm_get_ip_addr_info( + # self.module, attach["ip_address"], None, None + # ) + + serial = self.ip_to_serial_number(attach["ip_address"]) + + msg = f"ip_address: {attach['ip_address']}, " + msg += f"serial: {serial}, " + msg += f"attach: {json.dumps(attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if not serial: + msg = f"Fabric {self.fabric} does not contain switch " + msg += f"{attach['ip_address']}" + self.module.fail_json(msg=msg) + + role = self.inventory_data[attach["ip_address"]].get("switchRole") - ext_values["VRF_LITE_CONN"] = json.dumps(ext_values["VRF_LITE_CONN"]) + if role.lower() in ("spine", "super spine"): + msg = f"VRFs cannot be attached to switch {attach['ip_address']} " + msg += f"with role {role}" + self.module.fail_json(msg=msg) - vrf_ext = True + extension_values = self.update_attach_params_extension_values(attach) + if extension_values: + attach.update( + {"extensionValues": json.dumps(extension_values).replace(" ", "")} + ) + else: + attach.update({"extensionValues": ""}) attach.update({"fabric": self.fabric}) attach.update({"vrfName": vrf_name}) - attach.update({"vlan": vlanId}) + attach.update({"vlan": vlan_id}) # This flag is not to be confused for deploy of attachment. # "deployment" should be set True for attaching an attachment # and set to False for detaching an attachment @@ -941,11 +1159,9 @@ def update_attach_params(self, attach, vrf_name, deploy, vlanId): attach.update({"isAttached": True}) attach.update({"serialNumber": serial}) attach.update({"is_deploy": deploy}) - if vrf_ext: - attach.update({"extensionValues": json.dumps(ext_values).replace(" ", "")}) - else: - attach.update({"extensionValues": ""}) - # freeformConfig, loopbackId. loopbackIpAddress, and loopbackIpV6Address will be copied from have + + # freeformConfig, loopbackId, loopbackIpAddress, and loopbackIpV6Address + # will be copied from have attach.update({"freeformConfig": ""}) inst_values = { "loopbackId": "", @@ -953,18 +1169,54 @@ def update_attach_params(self, attach, vrf_name, deploy, vlanId): "loopbackIpV6Address": "", } if self.dcnm_version > 11: - inst_values.update({ - "switchRouteTargetImportEvpn": attach["import_evpn_rt"], - "switchRouteTargetExportEvpn": attach["export_evpn_rt"], - }) + inst_values.update( + { + "switchRouteTargetImportEvpn": attach["import_evpn_rt"], + "switchRouteTargetExportEvpn": attach["export_evpn_rt"], + } + ) attach.update({"instanceValues": json.dumps(inst_values).replace(" ", "")}) + if "deploy" in attach: del attach["deploy"] - del attach["ip_address"] + if "ip_address" in attach: + del attach["ip_address"] + + msg = "attach: " + msg += f"{json.dumps(attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + return copy.deepcopy(attach) + + def dict_values_differ(self, dict1, dict2, skip_keys=None) -> bool: + """ + # Summary - return attach + Given two dictionaries and, optionally, a list of keys to skip: + + - Return True if the values for any (non-skipped) keys differs. + - Return False otherwise + """ + self.log.debug("ENTERED") + + if skip_keys is None: + skip_keys = [] + + for key in dict1.keys(): + if key in skip_keys: + continue + dict1_value = str(dict1[key]).lower() + dict2_value = str(dict2[key]).lower() + if dict1_value != dict2_value: + msg = f"Values differ: key {key} " + msg += f"dict1_value {dict1_value} != dict2_value {dict2_value}. " + msg += "returning True" + self.log.debug(msg) + return True + return False def diff_for_create(self, want, have): + self.log.debug("ENTERED") conf_changed = False if not have: @@ -975,183 +1227,47 @@ def diff_for_create(self, want, have): json_to_dict_want = json.loads(want["vrfTemplateConfig"]) json_to_dict_have = json.loads(have["vrfTemplateConfig"]) - vlanId_want = str(json_to_dict_want.get("vrfVlanId", "")) - vlanId_have = json_to_dict_have.get("vrfVlanId", "") - vlanName_want = json_to_dict_want.get("vrfVlanName", "") - vlanName_have = json_to_dict_have.get("vrfVlanName", "") - vlanIntf_want = json_to_dict_want.get("vrfIntfDescription", "") - vlanIntf_have = json_to_dict_have.get("vrfIntfDescription", "") - vrfDesc_want = json_to_dict_want.get("vrfDescription", "") - vrfDesc_have = json_to_dict_have.get("vrfDescription", "") - vrfMtu_want = str(json_to_dict_want.get("mtu", "")) - vrfMtu_have = json_to_dict_have.get("mtu", "") - vrfTag_want = str(json_to_dict_want.get("tag", "")) - vrfTag_have = json_to_dict_have.get("tag", "") - redRmap_want = json_to_dict_want.get("vrfRouteMap", "") - redRmap_have = json_to_dict_have.get("vrfRouteMap", "") - maxBgp_want = str(json_to_dict_want.get("maxBgpPaths", "")) - maxBgp_have = json_to_dict_have.get("maxBgpPaths", "") - maxiBgp_want = str(json_to_dict_want.get("maxIbgpPaths", "")) - maxiBgp_have = json_to_dict_have.get("maxIbgpPaths", "") - ipv6ll_want = str(json_to_dict_want.get("ipv6LinkLocalFlag", "")).lower() - ipv6ll_have = json_to_dict_have.get("ipv6LinkLocalFlag", "") - trmen_want = str(json_to_dict_want.get("trmEnabled", "")).lower() - trmen_have = json_to_dict_have.get("trmEnabled", "") - norp_want = str(json_to_dict_want.get("isRPAbsent", "")).lower() - norp_have = json_to_dict_have.get("isRPAbsent", "") - rpext_want = str(json_to_dict_want.get("isRPExternal", "")).lower() - rpext_have = json_to_dict_have.get("isRPExternal", "") - rpadd_want = json_to_dict_want.get("rpAddress", "") - rpadd_have = json_to_dict_have.get("rpAddress", "") - rploid_want = str(json_to_dict_want.get("loopbackNumber", "")) - rploid_have = json_to_dict_have.get("loopbackNumber", "") - mcastadd_want = json_to_dict_want.get("L3VniMcastGroup", "") - mcastadd_have = json_to_dict_have.get("L3VniMcastGroup", "") - mcastgrp_want = json_to_dict_want.get("multicastGroup", "") - mcastgrp_have = json_to_dict_have.get("multicastGroup", "") - trmBgwms_want = str(json_to_dict_want.get("trmBGWMSiteEnabled", "")).lower() - trmBgwms_have = json_to_dict_have.get("trmBGWMSiteEnabled", "") - advhrt_want = str(json_to_dict_want.get("advertiseHostRouteFlag", "")).lower() - advhrt_have = json_to_dict_have.get("advertiseHostRouteFlag", "") - advdrt_want = str(json_to_dict_want.get("advertiseDefaultRouteFlag", "")).lower() - advdrt_have = json_to_dict_have.get("advertiseDefaultRouteFlag", "") - constd_want = str(json_to_dict_want.get("configureStaticDefaultRouteFlag", "")).lower() - constd_have = json_to_dict_have.get("configureStaticDefaultRouteFlag", "") - bgppass_want = json_to_dict_want.get("bgpPassword", "") - bgppass_have = json_to_dict_have.get("bgpPassword", "") - bgppasskey_want = str(json_to_dict_want.get("bgpPasswordKeyType", "")) - bgppasskey_have = json_to_dict_have.get("bgpPasswordKeyType", "") - nfen_want = str(json_to_dict_want.get("ENABLE_NETFLOW", "")).lower() - nfen_have = json_to_dict_have.get("ENABLE_NETFLOW", "") - nfmon_want = json_to_dict_want.get("NETFLOW_MONITOR", "") - nfmon_have = json_to_dict_have.get("NETFLOW_MONITOR", "") - disrtauto_want = str(json_to_dict_want.get("disableRtAuto", "")).lower() - disrtauto_have = json_to_dict_have.get("disableRtAuto", "") - rtvpnImp_want = json_to_dict_want.get("routeTargetImport", "") - rtvpnImp_have = json_to_dict_have.get("routeTargetImport", "") - rtvpnExp_want = json_to_dict_want.get("routeTargetExport", "") - rtvpnExp_have = json_to_dict_have.get("routeTargetExport", "") - rtevpnImp_want = json_to_dict_want.get("routeTargetImportEvpn", "") - rtevpnImp_have = json_to_dict_have.get("routeTargetImportEvpn", "") - rtevpnExp_want = json_to_dict_want.get("routeTargetExportEvpn", "") - rtevpnExp_have = json_to_dict_have.get("routeTargetExportEvpn", "") - rtmvpnImp_want = json_to_dict_want.get("routeTargetImportMvpn", "") - rtmvpnImp_have = json_to_dict_have.get("routeTargetImportMvpn", "") - rtmvpnExp_want = json_to_dict_want.get("routeTargetExportMvpn", "") - rtmvpnExp_have = json_to_dict_have.get("routeTargetExportMvpn", "") - - if vlanId_want != "0": - - if want["vrfId"] is not None and have["vrfId"] != want["vrfId"]: - self.module.fail_json( - msg="vrf_id for vrf:{0} cant be updated to a different value".format( - want["vrfName"] - ) - ) - elif ( - have["serviceVrfTemplate"] != want["serviceVrfTemplate"] - or have["vrfTemplate"] != want["vrfTemplate"] - or have["vrfExtensionTemplate"] != want["vrfExtensionTemplate"] - or vlanId_have != vlanId_want - or vlanName_have != vlanName_want - or vlanIntf_have != vlanIntf_want - or vrfDesc_have != vrfDesc_want - or vrfMtu_have != vrfMtu_want - or vrfTag_have != vrfTag_want - or redRmap_have != redRmap_want - or maxBgp_have != maxBgp_want - or maxiBgp_have != maxiBgp_want - or ipv6ll_have != ipv6ll_want - or trmen_have != trmen_want - or norp_have != norp_want - or rpext_have != rpext_want - or rpadd_have != rpadd_want - or rploid_have != rploid_want - or mcastadd_have != mcastadd_want - or mcastgrp_have != mcastgrp_want - or trmBgwms_have != trmBgwms_want - or advhrt_have != advhrt_want - or advdrt_have != advdrt_want - or constd_have != constd_want - or bgppass_have != bgppass_want - or bgppasskey_have != bgppasskey_want - or nfen_have != nfen_want - or nfmon_have != nfmon_want - or disrtauto_have != disrtauto_want - or rtvpnImp_have != rtvpnImp_want - or rtvpnExp_have != rtvpnExp_want - or rtevpnImp_have != rtevpnImp_want - or rtevpnExp_have != rtevpnExp_want - or rtmvpnImp_have != rtmvpnImp_want - or rtmvpnExp_have != rtmvpnExp_want - ): - - conf_changed = True - if want["vrfId"] is None: - # The vrf updates with missing vrfId will have to use existing - # vrfId from the instance of the same vrf on DCNM. - want["vrfId"] = have["vrfId"] - create = want - else: - pass + # vlan_id_want drives the conditional below, so we cannot + # remove it here (as we did with the other params that are + # compared in the call to self.dict_values_differ()) + vlan_id_want = str(json_to_dict_want.get("vrfVlanId", "")) + + skip_keys = [] + if vlan_id_want == "0": + skip_keys = ["vrfVlanId"] + templates_differ = self.dict_values_differ( + json_to_dict_want, json_to_dict_have, skip_keys=skip_keys + ) + + msg = f"templates_differ: {templates_differ}, " + msg += f"vlan_id_want: {vlan_id_want}" + self.log.debug(msg) + + if want["vrfId"] is not None and have["vrfId"] != want["vrfId"]: + msg = f"{self.class_name}.diff_for_create: " + msg += f"vrf_id for vrf {want['vrfName']} cannot be updated to " + msg += "a different value" + self.module.fail_json(msg=msg) + + elif templates_differ: + conf_changed = True + if want["vrfId"] is None: + # The vrf updates with missing vrfId will have to use existing + # vrfId from the instance of the same vrf on DCNM. + want["vrfId"] = have["vrfId"] + create = want else: + pass - if want["vrfId"] is not None and have["vrfId"] != want["vrfId"]: - self.module.fail_json( - msg="vrf_id for vrf:{0} cant be updated to a different value".format( - want["vrfName"] - ) - ) - elif ( - have["serviceVrfTemplate"] != want["serviceVrfTemplate"] - or have["vrfTemplate"] != want["vrfTemplate"] - or have["vrfExtensionTemplate"] != want["vrfExtensionTemplate"] - or vlanName_have != vlanName_want - or vlanIntf_have != vlanIntf_want - or vrfDesc_have != vrfDesc_want - or vrfMtu_have != vrfMtu_want - or vrfTag_have != vrfTag_want - or redRmap_have != redRmap_want - or maxBgp_have != maxBgp_want - or maxiBgp_have != maxiBgp_want - or ipv6ll_have != ipv6ll_want - or trmen_have != trmen_want - or norp_have != norp_want - or rpext_have != rpext_want - or rpadd_have != rpadd_want - or rploid_have != rploid_want - or mcastadd_have != mcastadd_want - or mcastgrp_have != mcastgrp_want - or trmBgwms_have != trmBgwms_want - or advhrt_have != advhrt_want - or advdrt_have != advdrt_want - or constd_have != constd_want - or bgppass_have != bgppass_want - or bgppasskey_have != bgppasskey_want - or nfen_have != nfen_want - or nfmon_have != nfmon_want - or disrtauto_have != disrtauto_want - or rtvpnImp_have != rtvpnImp_want - or rtvpnExp_have != rtvpnExp_want - or rtevpnImp_have != rtevpnImp_want - or rtevpnExp_have != rtevpnExp_want - or rtmvpnImp_have != rtmvpnImp_want - or rtmvpnExp_have != rtmvpnExp_want - ): - - conf_changed = True - if want["vrfId"] is None: - # The vrf updates with missing vrfId will have to use existing - # vrfId from the instance of the same vrf on DCNM. - want["vrfId"] = have["vrfId"] - create = want - else: - pass + msg = f"returning conf_changed: {conf_changed}, " + msg += f"create: {create}" + self.log.debug(msg) return create, conf_changed - def update_create_params(self, vrf, vlanId=""): + def update_create_params(self, vrf, vlan_id=""): + self.log.debug("ENTERED") if not vrf: return vrf @@ -1177,7 +1293,7 @@ def update_create_params(self, vrf, vlanId=""): template_conf = { "vrfSegmentId": vrf.get("vrf_id", None), "vrfName": vrf["vrf_name"], - "vrfVlanId": vlanId, + "vrfVlanId": vlan_id, "vrfVlanName": vrf.get("vrf_vlan_name", ""), "vrfIntfDescription": vrf.get("vrf_intf_desc", ""), "vrfDescription": vrf.get("vrf_description", ""), @@ -1216,26 +1332,59 @@ def update_create_params(self, vrf, vlanId=""): return vrf_upd - def get_have(self): - - have_create = [] - have_deploy = {} + def get_vrf_objects(self) -> dict: + """ + # Summary - curr_vrfs = "" - - method = "GET" + Retrieve all VRF objects from the controller + """ path = self.paths["GET_VRF"].format(self.fabric) - vrf_objects = dcnm_send(self.module, method, path) + vrf_objects = dcnm_send(self.module, "GET", path) missing_fabric, not_ok = self.handle_response(vrf_objects, "query_dcnm") if missing_fabric or not_ok: - msg1 = "Fabric {0} not present on DCNM".format(self.fabric) - msg2 = "Unable to find vrfs under fabric: {0}".format(self.fabric) - + msg1 = f"Fabric {self.fabric} not present on the controller" + msg2 = f"Unable to find vrfs under fabric: {self.fabric}" self.module.fail_json(msg=msg1 if missing_fabric else msg2) + return copy.deepcopy(vrf_objects) + + def get_vrf_lite_objects(self, attach) -> dict: + """ + # Summary + + Retrieve the IP/Interface that is connected to the switch with serial_number + + attach must contain at least the following keys: + + - fabric: The fabric to search + - serialNumber: The serial_number of the switch + - vrfName: The vrf to search + """ + self.log.debug("ENTERED") + + verb = "GET" + path = self.paths["GET_VRF_SWITCH"].format( + attach["fabric"], attach["vrfName"], attach["serialNumber"] + ) + msg = f"verb: {verb}, path: {path}" + self.log.debug(msg) + lite_objects = dcnm_send(self.module, verb, path) + + return copy.deepcopy(lite_objects) + + def get_have(self): + self.log.debug("ENTERED") + + have_create = [] + have_deploy = {} + + curr_vrfs = "" + + vrf_objects = self.get_vrf_objects() + if not vrf_objects.get("DATA"): return @@ -1275,9 +1424,15 @@ def get_have(self): "L3VniMcastGroup": json_to_dict.get("L3VniMcastGroup", ""), "multicastGroup": json_to_dict.get("multicastGroup", ""), "trmBGWMSiteEnabled": json_to_dict.get("trmBGWMSiteEnabled", False), - "advertiseHostRouteFlag": json_to_dict.get("advertiseHostRouteFlag", False), - "advertiseDefaultRouteFlag": json_to_dict.get("advertiseDefaultRouteFlag", True), - "configureStaticDefaultRouteFlag": json_to_dict.get("configureStaticDefaultRouteFlag", True), + "advertiseHostRouteFlag": json_to_dict.get( + "advertiseHostRouteFlag", False + ), + "advertiseDefaultRouteFlag": json_to_dict.get( + "advertiseDefaultRouteFlag", True + ), + "configureStaticDefaultRouteFlag": json_to_dict.get( + "configureStaticDefaultRouteFlag", True + ), "bgpPassword": json_to_dict.get("bgpPassword", ""), "bgpPasswordKeyType": json_to_dict.get("bgpPasswordKeyType", ""), } @@ -1287,12 +1442,24 @@ def get_have(self): t_conf.update(ENABLE_NETFLOW=json_to_dict.get("ENABLE_NETFLOW", False)) t_conf.update(NETFLOW_MONITOR=json_to_dict.get("NETFLOW_MONITOR", "")) t_conf.update(disableRtAuto=json_to_dict.get("disableRtAuto", False)) - t_conf.update(routeTargetImport=json_to_dict.get("routeTargetImport", "")) - t_conf.update(routeTargetExport=json_to_dict.get("routeTargetExport", "")) - t_conf.update(routeTargetImportEvpn=json_to_dict.get("routeTargetImportEvpn", "")) - t_conf.update(routeTargetExportEvpn=json_to_dict.get("routeTargetExportEvpn", "")) - t_conf.update(routeTargetImportMvpn=json_to_dict.get("routeTargetImportMvpn", "")) - t_conf.update(routeTargetExportMvpn=json_to_dict.get("routeTargetExportMvpn", "")) + t_conf.update( + routeTargetImport=json_to_dict.get("routeTargetImport", "") + ) + t_conf.update( + routeTargetExport=json_to_dict.get("routeTargetExport", "") + ) + t_conf.update( + routeTargetImportEvpn=json_to_dict.get("routeTargetImportEvpn", "") + ) + t_conf.update( + routeTargetExportEvpn=json_to_dict.get("routeTargetExportEvpn", "") + ) + t_conf.update( + routeTargetImportMvpn=json_to_dict.get("routeTargetImportMvpn", "") + ) + t_conf.update( + routeTargetExportMvpn=json_to_dict.get("routeTargetExportMvpn", "") + ) vrf.update({"vrfTemplateConfig": json.dumps(t_conf)}) del vrf["vrfStatus"] @@ -1304,12 +1471,12 @@ def get_have(self): if not vrf_attach.get("lanAttachList"): continue attach_list = vrf_attach["lanAttachList"] - dep_vrf = "" + deploy_vrf = "" for attach in attach_list: attach_state = False if attach["lanAttachState"] == "NA" else True deploy = attach["isLanAttached"] deployed = False - if bool(deploy) and ( + if deploy and ( attach["lanAttachState"] == "OUT-OF-SYNC" or attach["lanAttachState"] == "PENDING" ): @@ -1317,17 +1484,17 @@ def get_have(self): else: deployed = True - if bool(deployed): - dep_vrf = attach["vrfName"] + if deployed: + deploy_vrf = attach["vrfName"] sn = attach["switchSerialNo"] vlan = attach["vlanId"] inst_values = attach.get("instanceValues", None) - # The deletes and updates below are done to update the incoming dictionary format to - # match to what the outgoing payload requirements mandate. - # Ex: 'vlanId' in the attach section of incoming payload needs to be changed to 'vlan' - # on the attach section of outgoing payload. + # The deletes and updates below are done to update the incoming + # dictionary format to align with the outgoing payload requirements. + # Ex: 'vlanId' in the attach section of the incoming payload needs to + # be changed to 'vlan' on the attach section of outgoing payload. del attach["vlanId"] del attach["switchSerialNo"] @@ -1348,90 +1515,60 @@ def get_have(self): attach.update({"isAttached": attach_state}) attach.update({"is_deploy": deployed}) - """ Get the VRF LITE extension template and update it to the attach['extensionvalues']""" + # Get the VRF LITE extension template and update it + # with the attach['extensionvalues'] - """Get the IP/Interface that is connected to edge router can be get from below query""" - method = "GET" - path = self.paths["GET_VRF_SWITCH"].format( - self.fabric, attach["vrfName"], sn - ) - - lite_objects = dcnm_send(self.module, method, path) + lite_objects = self.get_vrf_lite_objects(attach) if not lite_objects.get("DATA"): + msg = "Early return. lite_objects missing DATA" + self.log.debug(msg) return for sdl in lite_objects["DATA"]: for epv in sdl["switchDetailsList"]: - if epv.get("extensionValues"): - ext_values = epv["extensionValues"] - ext_values = ast.literal_eval(ext_values) - if ext_values.get("VRF_LITE_CONN") is not None: - ext_values = ast.literal_eval( - ext_values["VRF_LITE_CONN"] - ) - extension_values = {} - extension_values["VRF_LITE_CONN"] = [] - - for ev in ext_values.get("VRF_LITE_CONN"): - vrflite_con = {} - - vrflite_con["VRF_LITE_CONN"] = [] - vrflite_con["VRF_LITE_CONN"].append({}) - vrflite_con["VRF_LITE_CONN"][0]["IF_NAME"] = ev[ - "IF_NAME" - ] - vrflite_con["VRF_LITE_CONN"][0]["DOT1Q_ID"] = str( - ev["DOT1Q_ID"] - ) - vrflite_con["VRF_LITE_CONN"][0]["IP_MASK"] = ev[ - "IP_MASK" - ] - vrflite_con["VRF_LITE_CONN"][0]["NEIGHBOR_IP"] = ev[ - "NEIGHBOR_IP" - ] - vrflite_con["VRF_LITE_CONN"][0]["IPV6_MASK"] = ev[ - "IPV6_MASK" - ] - vrflite_con["VRF_LITE_CONN"][0][ - "IPV6_NEIGHBOR" - ] = ev["IPV6_NEIGHBOR"] - - vrflite_con["VRF_LITE_CONN"][0][ - "AUTO_VRF_LITE_FLAG" - ] = "false" - vrflite_con["VRF_LITE_CONN"][0][ - "PEER_VRF_NAME" - ] = ev["PEER_VRF_NAME"] - vrflite_con["VRF_LITE_CONN"][0][ - "VRF_LITE_JYTHON_TEMPLATE" - ] = "Ext_VRF_Lite_Jython" - - if (extension_values["VRF_LITE_CONN"]): - extension_values["VRF_LITE_CONN"]["VRF_LITE_CONN"].extend(vrflite_con["VRF_LITE_CONN"]) - else: - extension_values["VRF_LITE_CONN"] = vrflite_con - - extension_values["VRF_LITE_CONN"] = json.dumps( - extension_values["VRF_LITE_CONN"] - ) + if not epv.get("extensionValues"): + attach.update({"freeformConfig": ""}) + continue + ext_values = ast.literal_eval(epv["extensionValues"]) + if ext_values.get("VRF_LITE_CONN") is None: + continue + ext_values = ast.literal_eval(ext_values["VRF_LITE_CONN"]) + extension_values = {} + extension_values["VRF_LITE_CONN"] = [] - ms_con = {} - ms_con["MULTISITE_CONN"] = [] - extension_values["MULTISITE_CONN"] = json.dumps( - ms_con - ) - e_values = json.dumps(extension_values).replace( - " ", "" - ) + for ev in ext_values.get("VRF_LITE_CONN"): + ev_dict = copy.deepcopy(ev) + ev_dict.update({"AUTO_VRF_LITE_FLAG": "false"}) + ev_dict.update( + {"VRF_LITE_JYTHON_TEMPLATE": "Ext_VRF_Lite_Jython"} + ) + + if extension_values["VRF_LITE_CONN"]: + extension_values["VRF_LITE_CONN"][ + "VRF_LITE_CONN" + ].extend([ev_dict]) + else: + extension_values["VRF_LITE_CONN"] = { + "VRF_LITE_CONN": [ev_dict] + } + + extension_values["VRF_LITE_CONN"] = json.dumps( + extension_values["VRF_LITE_CONN"] + ) - attach.update({"extensionValues": e_values}) + ms_con = {} + ms_con["MULTISITE_CONN"] = [] + extension_values["MULTISITE_CONN"] = json.dumps(ms_con) + e_values = json.dumps(extension_values).replace(" ", "") + + attach.update({"extensionValues": e_values}) ff_config = epv.get("freeformConfig", "") attach.update({"freeformConfig": ff_config}) - if dep_vrf: - upd_vrfs += dep_vrf + "," + if deploy_vrf: + upd_vrfs += deploy_vrf + "," have_attach = vrf_attach_objects["DATA"] @@ -1442,52 +1579,105 @@ def get_have(self): self.have_attach = have_attach self.have_deploy = have_deploy + msg = "self.have_create: " + msg += f"{json.dumps(self.have_create, indent=4)}" + self.log.debug(msg) + + # json.dumps() here breaks unit tests since self.have_attach is + # a MagicMock and not JSON serializable. + msg = "self.have_attach: " + msg += f"{self.have_attach}" + self.log.debug(msg) + + msg = "self.have_deploy: " + msg += f"{json.dumps(self.have_deploy, indent=4)}" + self.log.debug(msg) + def get_want(self): + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") want_create = [] want_attach = [] want_deploy = {} - all_vrfs = "" + msg = "self.config " + msg += f"{json.dumps(self.config, indent=4)}" + self.log.debug(msg) - if not self.config: - return + all_vrfs = [] + + msg = "self.validated: " + msg += f"{json.dumps(self.validated, indent=4, sort_keys=True)}" + self.log.debug(msg) for vrf in self.validated: + vrf_name = vrf.get("vrf_name") + if not vrf_name: + msg = f"{self.class_name}.{method_name}: " + msg += f"vrf missing mandatory key vrf_name: {vrf}" + self.module.fail_json(msg=msg) + + all_vrfs.append(vrf_name) vrf_attach = {} vrfs = [] vrf_deploy = vrf.get("deploy", True) if vrf.get("vlan_id"): - vlanId = vrf.get("vlan_id") + vlan_id = vrf.get("vlan_id") else: - vlanId = 0 + vlan_id = 0 - want_create.append(self.update_create_params(vrf, vlanId)) + want_create.append(self.update_create_params(vrf, vlan_id)) if not vrf.get("attach"): + msg = f"No attachments for vrf {vrf_name}. Skipping." + self.log.debug(msg) continue for attach in vrf["attach"]: deploy = vrf_deploy vrfs.append( - self.update_attach_params(attach, vrf["vrf_name"], deploy, vlanId) + self.update_attach_params(attach, vrf_name, deploy, vlan_id) ) if vrfs: - vrf_attach.update({"vrfName": vrf["vrf_name"]}) + vrf_attach.update({"vrfName": vrf_name}) vrf_attach.update({"lanAttachList": vrfs}) want_attach.append(vrf_attach) - all_vrfs += vrf["vrf_name"] + "," - if all_vrfs: - want_deploy.update({"vrfNames": all_vrfs[:-1]}) + vrf_names = ",".join(all_vrfs) + want_deploy.update({"vrfNames": vrf_names}) + + self.want_create = copy.deepcopy(want_create) + self.want_attach = copy.deepcopy(want_attach) + self.want_deploy = copy.deepcopy(want_deploy) + + msg = "self.want_create: " + msg += f"{json.dumps(self.want_create, indent=4)}" + self.log.debug(msg) - self.want_create = want_create - self.want_attach = want_attach - self.want_deploy = want_deploy + msg = "self.want_attach: " + msg += f"{json.dumps(self.want_attach, indent=4)}" + self.log.debug(msg) + + msg = "self.want_deploy: " + msg += f"{json.dumps(self.want_deploy, indent=4)}" + self.log.debug(msg) def get_diff_delete(self): + self.log.debug("ENTERED") + + @staticmethod + def get_items_to_detach(attach_list): + detach_list = [] + for item in attach_list: + if "isAttached" in item: + if item["isAttached"]: + del item["isAttached"] + item.update({"deployment": False}) + detach_list.append(item) + return detach_list diff_detach = [] diff_undeploy = {} @@ -1498,54 +1688,35 @@ def get_diff_delete(self): if self.config: for want_c in self.want_create: - if not next( - ( - have_c - for have_c in self.have_create - if have_c["vrfName"] == want_c["vrfName"] - ), - None, + + if not self.find_dict_in_list_by_key_value( + search=self.have_create, key="vrfName", value=want_c["vrfName"] ): continue + diff_delete.update({want_c["vrfName"]: "DEPLOYED"}) - have_a = next( - ( - attach - for attach in self.have_attach - if attach["vrfName"] == want_c["vrfName"] - ), - None, + have_a = self.find_dict_in_list_by_key_value( + search=self.have_attach, key="vrfName", value=want_c["vrfName"] ) if not have_a: continue - to_del = [] - atch_h = have_a["lanAttachList"] - for a_h in atch_h: - if a_h["isAttached"]: - del a_h["isAttached"] - a_h.update({"deployment": False}) - to_del.append(a_h) - if to_del: - have_a.update({"lanAttachList": to_del}) + detach_items = get_items_to_detach(have_a["lanAttachList"]) + if detach_items: + have_a.update({"lanAttachList": detach_items}) diff_detach.append(have_a) all_vrfs += have_a["vrfName"] + "," if all_vrfs: diff_undeploy.update({"vrfNames": all_vrfs[:-1]}) else: + for have_a in self.have_attach: - to_del = [] - atch_h = have_a["lanAttachList"] - for a_h in atch_h: - if a_h["isAttached"]: - del a_h["isAttached"] - a_h.update({"deployment": False}) - to_del.append(a_h) - if to_del: - have_a.update({"lanAttachList": to_del}) + detach_items = get_items_to_detach(have_a["lanAttachList"]) + if detach_items: + have_a.update({"lanAttachList": detach_items}) diff_detach.append(have_a) all_vrfs += have_a["vrfName"] + "," @@ -1557,39 +1728,45 @@ def get_diff_delete(self): self.diff_undeploy = diff_undeploy self.diff_delete = diff_delete + msg = "self.diff_detach: " + msg += f"{json.dumps(self.diff_detach, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_undeploy: " + msg += f"{json.dumps(self.diff_undeploy, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_delete: " + msg += f"{json.dumps(self.diff_delete, indent=4)}" + self.log.debug(msg) + def get_diff_override(self): + self.log.debug("ENTERED") all_vrfs = "" diff_delete = {} self.get_diff_replace() - diff_create = self.diff_create - diff_attach = self.diff_attach diff_detach = self.diff_detach - diff_deploy = self.diff_deploy diff_undeploy = self.diff_undeploy for have_a in self.have_attach: - found = next( - ( - vrf - for vrf in self.want_create - if vrf["vrfName"] == have_a["vrfName"] - ), - None, + found = self.find_dict_in_list_by_key_value( + search=self.want_create, key="vrfName", value=have_a["vrfName"] ) - to_del = [] + + detach_list = [] if not found: - atch_h = have_a["lanAttachList"] - for a_h in atch_h: - if a_h["isAttached"]: - del a_h["isAttached"] - a_h.update({"deployment": False}) - to_del.append(a_h) - - if to_del: - have_a.update({"lanAttachList": to_del}) + for item in have_a["lanAttachList"]: + if "isAttached" in item: + if item["isAttached"]: + del item["isAttached"] + item.update({"deployment": False}) + detach_list.append(item) + + if detach_list: + have_a.update({"lanAttachList": detach_list}) diff_detach.append(have_a) all_vrfs += have_a["vrfName"] + "," @@ -1598,84 +1775,89 @@ def get_diff_override(self): if all_vrfs: diff_undeploy.update({"vrfNames": all_vrfs[:-1]}) - self.diff_create = diff_create - self.diff_attach = diff_attach - self.diff_deploy = diff_deploy + self.diff_delete = diff_delete self.diff_detach = diff_detach self.diff_undeploy = diff_undeploy - self.diff_delete = diff_delete + + msg = "self.diff_delete: " + msg += f"{json.dumps(self.diff_delete, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_detach: " + msg += f"{json.dumps(self.diff_detach, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_undeploy: " + msg += f"{json.dumps(self.diff_undeploy, indent=4)}" + self.log.debug(msg) def get_diff_replace(self): + self.log.debug("ENTERED") all_vrfs = "" self.get_diff_merge(replace=True) - diff_create = self.diff_create diff_attach = self.diff_attach diff_deploy = self.diff_deploy for have_a in self.have_attach: - r_vrf_list = [] + replace_vrf_list = [] h_in_w = False for want_a in self.want_attach: if have_a["vrfName"] == want_a["vrfName"]: h_in_w = True - atch_h = have_a["lanAttachList"] - atch_w = want_a.get("lanAttachList") - for a_h in atch_h: - if not a_h["isAttached"]: - continue + for a_h in have_a["lanAttachList"]: + if "isAttached" in a_h: + if not a_h["isAttached"]: + continue a_match = False - if atch_w: - for a_w in atch_w: + if want_a.get("lanAttachList"): + for a_w in want_a.get("lanAttachList"): if a_h["serialNumber"] == a_w["serialNumber"]: # Have is already in diff, no need to continue looking for it. a_match = True break if not a_match: - del a_h["isAttached"] + if "isAttached" in a_h: + del a_h["isAttached"] a_h.update({"deployment": False}) - r_vrf_list.append(a_h) + replace_vrf_list.append(a_h) break if not h_in_w: - found = next( - ( - vrf - for vrf in self.want_create - if vrf["vrfName"] == have_a["vrfName"] - ), - None, + found = self.find_dict_in_list_by_key_value( + search=self.want_create, key="vrfName", value=have_a["vrfName"] ) + if found: atch_h = have_a["lanAttachList"] for a_h in atch_h: - if not bool(a_h["isAttached"]): - continue - del a_h["isAttached"] - a_h.update({"deployment": False}) - r_vrf_list.append(a_h) + if "isAttached" in a_h: + if not a_h["isAttached"]: + continue + del a_h["isAttached"] + a_h.update({"deployment": False}) + replace_vrf_list.append(a_h) - if r_vrf_list: + if replace_vrf_list: in_diff = False for d_attach in self.diff_attach: if have_a["vrfName"] == d_attach["vrfName"]: in_diff = True - d_attach["lanAttachList"].extend(r_vrf_list) + d_attach["lanAttachList"].extend(replace_vrf_list) break if not in_diff: r_vrf_dict = { "vrfName": have_a["vrfName"], - "lanAttachList": r_vrf_list, + "lanAttachList": replace_vrf_list, } diff_attach.append(r_vrf_dict) all_vrfs += have_a["vrfName"] + "," if not all_vrfs: - self.diff_create = diff_create self.diff_attach = diff_attach self.diff_deploy = diff_deploy return @@ -1686,137 +1868,195 @@ def get_diff_replace(self): vrfs = self.diff_deploy["vrfNames"] + "," + all_vrfs[:-1] diff_deploy.update({"vrfNames": vrfs}) - self.diff_create = diff_create - self.diff_attach = diff_attach - self.diff_deploy = diff_deploy + self.diff_attach = copy.deepcopy(diff_attach) + self.diff_deploy = copy.deepcopy(diff_deploy) - def get_diff_merge(self, replace=False): + msg = "self.diff_attach: " + msg += f"{json.dumps(self.diff_attach, indent=4)}" + self.log.debug(msg) - # Special cases: - # 1. Auto generate vrfId if its not mentioned by user: - # In this case, we need to query the DCNM to get a usable ID and use it in the payload. - # And also, any such vrf create requests need to be pushed individually(not bulk op). + msg = "self.diff_deploy: " + msg += f"{json.dumps(self.diff_deploy, indent=4)}" + self.log.debug(msg) + + def get_next_vrf_id(self, fabric) -> int: + """ + # Summary + + Return the next available vrf_id for fabric. + + ## Raises + + Calls fail_json() if: + - Controller version is unsupported + - Unable to retrieve next available vrf_id for fabric + """ + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") + + attempt = 0 + vrf_id = None + while attempt < 10: + attempt += 1 + path = self.paths["GET_VRF_ID"].format(fabric) + if self.dcnm_version > 11: + vrf_id_obj = dcnm_send(self.module, "GET", path) + else: + vrf_id_obj = dcnm_send(self.module, "POST", path) + + missing_fabric, not_ok = self.handle_response(vrf_id_obj, "query_dcnm") + + if missing_fabric or not_ok: + # arobel: TODO: Not covered by UT + msg0 = f"{self.class_name}.{method_name}: " + msg1 = f"{msg0} Fabric {fabric} not present on the controller" + msg2 = f"{msg0} Unable to generate vrfId under fabric {fabric}" + self.module.fail_json(msg=msg1 if missing_fabric else msg2) + + if not vrf_id_obj["DATA"]: + continue + + if self.dcnm_version == 11: + vrf_id = vrf_id_obj["DATA"].get("partitionSegmentId") + elif self.dcnm_version >= 12: + vrf_id = vrf_id_obj["DATA"].get("l3vni") + else: + # arobel: TODO: Not covered by UT + msg = f"{self.class_name}.{method_name}: " + msg += "Unsupported controller version: " + msg += f"{self.dcnm_version}" + self.module.fail_json(msg) + + if vrf_id is None: + msg = f"{self.class_name}.{method_name}: " + msg += "Unable to retrieve vrf_id " + msg += f"for fabric {fabric}" + self.module.fail_json(msg) + return int(str(vrf_id)) + + def diff_merge_create(self, replace=False): + msg = f"ENTERED with replace == {replace}" + self.log.debug(msg) + + conf_changed = {} diff_create = [] diff_create_update = [] diff_create_quick = [] - diff_attach = [] - diff_deploy = {} - prev_vrf_id_fetched = None - conf_changed = {} - - all_vrfs = "" - attach_found = False - vrf_found = False for want_c in self.want_create: vrf_found = False for have_c in self.have_create: if want_c["vrfName"] == have_c["vrfName"]: vrf_found = True + msg = "Calling diff_for_create with: " + msg += f"want_c: {json.dumps(want_c, indent=4, sort_keys=True)}, " + msg += f"have_c: {json.dumps(have_c, indent=4, sort_keys=True)}" + self.log.debug(msg) + diff, conf_chg = self.diff_for_create(want_c, have_c) + + msg = "diff_for_create() returned with: " + msg += f"conf_chg {conf_chg}, " + msg += f"diff {json.dumps(diff, indent=4, sort_keys=True)}, " + self.log.debug(msg) + + msg = f"Updating conf_changed[{want_c['vrfName']}] " + msg += f"with {conf_chg}" + self.log.debug(msg) conf_changed.update({want_c["vrfName"]: conf_chg}) + if diff: + msg = "Appending diff_create_update with " + msg += f"{json.dumps(diff, indent=4, sort_keys=True)}" + self.log.debug(msg) diff_create_update.append(diff) break + if not vrf_found: vrf_id = want_c.get("vrfId", None) - if vrf_id is None: + if vrf_id is not None: + diff_create.append(want_c) + else: # vrfId is not provided by user. - # Need to query DCNM to fetch next available vrfId and use it here. - method = "POST" - - attempt = 0 - while attempt < 10: - attempt += 1 - path = self.paths["GET_VRF_ID"].format(self.fabric) - if self.dcnm_version > 11: - vrf_id_obj = dcnm_send(self.module, "GET", path) - else: - vrf_id_obj = dcnm_send(self.module, method, path) + # Fetch the next available vrfId and use it here. + vrf_id = self.get_next_vrf_id(self.fabric) + + want_c.update({"vrfId": vrf_id}) + json_to_dict = json.loads(want_c["vrfTemplateConfig"]) + template_conf = { + "vrfSegmentId": vrf_id, + "vrfName": want_c["vrfName"], + "vrfVlanId": json_to_dict.get("vrfVlanId"), + "vrfVlanName": json_to_dict.get("vrfVlanName"), + "vrfIntfDescription": json_to_dict.get("vrfIntfDescription"), + "vrfDescription": json_to_dict.get("vrfDescription"), + "mtu": json_to_dict.get("mtu"), + "tag": json_to_dict.get("tag"), + "vrfRouteMap": json_to_dict.get("vrfRouteMap"), + "maxBgpPaths": json_to_dict.get("maxBgpPaths"), + "maxIbgpPaths": json_to_dict.get("maxIbgpPaths"), + "ipv6LinkLocalFlag": json_to_dict.get("ipv6LinkLocalFlag"), + "trmEnabled": json_to_dict.get("trmEnabled"), + "isRPExternal": json_to_dict.get("isRPExternal"), + "rpAddress": json_to_dict.get("rpAddress"), + "loopbackNumber": json_to_dict.get("loopbackNumber"), + "L3VniMcastGroup": json_to_dict.get("L3VniMcastGroup"), + "multicastGroup": json_to_dict.get("multicastGroup"), + "trmBGWMSiteEnabled": json_to_dict.get("trmBGWMSiteEnabled"), + "advertiseHostRouteFlag": json_to_dict.get( + "advertiseHostRouteFlag" + ), + "advertiseDefaultRouteFlag": json_to_dict.get( + "advertiseDefaultRouteFlag" + ), + "configureStaticDefaultRouteFlag": json_to_dict.get( + "configureStaticDefaultRouteFlag" + ), + "bgpPassword": json_to_dict.get("bgpPassword"), + "bgpPasswordKeyType": json_to_dict.get("bgpPasswordKeyType"), + } - missing_fabric, not_ok = self.handle_response( - vrf_id_obj, "query_dcnm" + if self.dcnm_version > 11: + template_conf.update(isRPAbsent=json_to_dict.get("isRPAbsent")) + template_conf.update( + ENABLE_NETFLOW=json_to_dict.get("ENABLE_NETFLOW") ) - - if missing_fabric or not_ok: - msg1 = "Fabric {0} not present on DCNM".format(self.fabric) - msg2 = ( - "Unable to generate vrfId for vrf: {0} " - "under fabric: {1}".format( - want_c["vrfName"], self.fabric - ) + template_conf.update( + NETFLOW_MONITOR=json_to_dict.get("NETFLOW_MONITOR") + ) + template_conf.update( + disableRtAuto=json_to_dict.get("disableRtAuto") + ) + template_conf.update( + routeTargetImport=json_to_dict.get("routeTargetImport") + ) + template_conf.update( + routeTargetExport=json_to_dict.get("routeTargetExport") + ) + template_conf.update( + routeTargetImportEvpn=json_to_dict.get( + "routeTargetImportEvpn" ) - - self.module.fail_json(msg=msg1 if missing_fabric else msg2) - - if not vrf_id_obj["DATA"]: - continue - - if self.dcnm_version == 11: - vrf_id = vrf_id_obj["DATA"].get("partitionSegmentId") - elif self.dcnm_version >= 12: - vrf_id = vrf_id_obj["DATA"].get("l3vni") - else: - msg = "Unsupported DCNM version: version {0}".format( - self.dcnm_version + ) + template_conf.update( + routeTargetExportEvpn=json_to_dict.get( + "routeTargetExportEvpn" ) - self.module.fail_json(msg) - - if vrf_id != prev_vrf_id_fetched: - want_c.update({"vrfId": vrf_id}) - json_to_dict = json.loads(want_c["vrfTemplateConfig"]) - template_conf = { - "vrfSegmentId": vrf_id, - "vrfName": want_c["vrfName"], - "vrfVlanId": json_to_dict.get("vrfVlanId"), - "vrfVlanName": json_to_dict.get("vrfVlanName"), - "vrfIntfDescription": json_to_dict.get("vrfIntfDescription"), - "vrfDescription": json_to_dict.get("vrfDescription"), - "mtu": json_to_dict.get("mtu"), - "tag": json_to_dict.get("tag"), - "vrfRouteMap": json_to_dict.get("vrfRouteMap"), - "maxBgpPaths": json_to_dict.get("maxBgpPaths"), - "maxIbgpPaths": json_to_dict.get("maxIbgpPaths"), - "ipv6LinkLocalFlag": json_to_dict.get("ipv6LinkLocalFlag"), - "trmEnabled": json_to_dict.get("trmEnabled"), - "isRPExternal": json_to_dict.get("isRPExternal"), - "rpAddress": json_to_dict.get("rpAddress"), - "loopbackNumber": json_to_dict.get("loopbackNumber"), - "L3VniMcastGroup": json_to_dict.get("L3VniMcastGroup"), - "multicastGroup": json_to_dict.get("multicastGroup"), - "trmBGWMSiteEnabled": json_to_dict.get("trmBGWMSiteEnabled"), - "advertiseHostRouteFlag": json_to_dict.get("advertiseHostRouteFlag"), - "advertiseDefaultRouteFlag": json_to_dict.get("advertiseDefaultRouteFlag"), - "configureStaticDefaultRouteFlag": json_to_dict.get("configureStaticDefaultRouteFlag"), - "bgpPassword": json_to_dict.get("bgpPassword"), - "bgpPasswordKeyType": json_to_dict.get("bgpPasswordKeyType"), - } - - if self.dcnm_version > 11: - template_conf.update(isRPAbsent=json_to_dict.get("isRPAbsent")) - template_conf.update(ENABLE_NETFLOW=json_to_dict.get("ENABLE_NETFLOW")) - template_conf.update(NETFLOW_MONITOR=json_to_dict.get("NETFLOW_MONITOR")) - template_conf.update(disableRtAuto=json_to_dict.get("disableRtAuto")) - template_conf.update(routeTargetImport=json_to_dict.get("routeTargetImport")) - template_conf.update(routeTargetExport=json_to_dict.get("routeTargetExport")) - template_conf.update(routeTargetImportEvpn=json_to_dict.get("routeTargetImportEvpn")) - template_conf.update(routeTargetExportEvpn=json_to_dict.get("routeTargetExportEvpn")) - template_conf.update(routeTargetImportMvpn=json_to_dict.get("routeTargetImportMvpn")) - template_conf.update(routeTargetExportMvpn=json_to_dict.get("routeTargetExportMvpn")) - - want_c.update( - {"vrfTemplateConfig": json.dumps(template_conf)} + ) + template_conf.update( + routeTargetImportMvpn=json_to_dict.get( + "routeTargetImportMvpn" + ) + ) + template_conf.update( + routeTargetExportMvpn=json_to_dict.get( + "routeTargetExportMvpn" ) - prev_vrf_id_fetched = vrf_id - break - - if not vrf_id: - self.module.fail_json( - msg="Unable to generate vrfId for vrf: {0} " - "under fabric: {1}".format(want_c["vrfName"], self.fabric) ) + want_c.update({"vrfTemplateConfig": json.dumps(template_conf)}) + create_path = self.paths["GET_VRF"].format(self.fabric) diff_create_quick.append(want_c) @@ -1824,24 +2064,47 @@ def get_diff_merge(self, replace=False): if self.module.check_mode: continue + # arobel: TODO: Not covered by UT resp = dcnm_send( - self.module, method, create_path, json.dumps(want_c) + self.module, "POST", create_path, json.dumps(want_c) ) self.result["response"].append(resp) fail, self.result["changed"] = self.handle_response(resp, "create") if fail: self.failure(resp) - else: - diff_create.append(want_c) + self.diff_create = diff_create + self.diff_create_update = diff_create_update + self.diff_create_quick = diff_create_quick + msg = "self.diff_create: " + msg += f"{json.dumps(self.diff_create, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_create_quick: " + msg += f"{json.dumps(self.diff_create_quick, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_create_update: " + msg += f"{json.dumps(self.diff_create_update, indent=4)}" + self.log.debug(msg) + + def diff_merge_attach(self, replace=False): + msg = f"ENTERED with replace == {replace}" + self.log.debug(msg) + + diff_attach = [] + diff_deploy = {} + conf_changed = {} + + all_vrfs = "" for want_a in self.want_attach: - dep_vrf = "" + deploy_vrf = "" attach_found = False for have_a in self.have_attach: if want_a["vrfName"] == have_a["vrfName"]: attach_found = True - diff, vrf = self.diff_for_attach_deploy( + diff, deploy_vrf_bool = self.diff_for_attach_deploy( want_a["lanAttachList"], have_a["lanAttachList"], replace ) if diff: @@ -1850,42 +2113,67 @@ def get_diff_merge(self, replace=False): base.update({"lanAttachList": diff}) diff_attach.append(base) - if vrf: - dep_vrf = want_a["vrfName"] + if deploy_vrf_bool is True: + deploy_vrf = want_a["vrfName"] else: - if vrf or conf_changed.get(want_a["vrfName"], False): - dep_vrf = want_a["vrfName"] + if deploy_vrf_bool or conf_changed.get( + want_a["vrfName"], False + ): + deploy_vrf = want_a["vrfName"] + + msg = f"attach_found: {attach_found}" + self.log.debug(msg) if not attach_found and want_a.get("lanAttachList"): - atch_list = [] + attach_list = [] for attach in want_a["lanAttachList"]: if attach.get("isAttached"): del attach["isAttached"] - atch_list.append(attach) - if atch_list: + if attach.get("is_deploy") is True: + deploy_vrf = want_a["vrfName"] + attach["deployment"] = True + attach_list.append(copy.deepcopy(attach)) + if attach_list: base = want_a.copy() del base["lanAttachList"] - base.update({"lanAttachList": atch_list}) + base.update({"lanAttachList": attach_list}) diff_attach.append(base) - if bool(attach["is_deploy"]): - dep_vrf = want_a["vrfName"] - - for atch in atch_list: - atch["deployment"] = True + # for atch in attach_list: + # atch["deployment"] = True - if dep_vrf: - all_vrfs += dep_vrf + "," + if deploy_vrf: + all_vrfs += deploy_vrf + "," if all_vrfs: diff_deploy.update({"vrfNames": all_vrfs[:-1]}) - self.diff_create = diff_create - self.diff_create_update = diff_create_update self.diff_attach = diff_attach self.diff_deploy = diff_deploy - self.diff_create_quick = diff_create_quick + + msg = "self.diff_attach: " + msg += f"{json.dumps(self.diff_attach, indent=4)}" + self.log.debug(msg) + + msg = "self.diff_deploy: " + msg += f"{json.dumps(self.diff_deploy, indent=4)}" + self.log.debug(msg) + + def get_diff_merge(self, replace=False): + msg = f"ENTERED with replace == {replace}" + self.log.debug(msg) + + # Special cases: + # 1. Auto generate vrfId if its not mentioned by user: + # - In this case, query the controller for a vrfId and + # use it in the payload. + # - Any such vrf create requests need to be pushed individually + # (not bulk op). + + self.diff_merge_create(replace) + self.diff_merge_attach(replace) def format_diff(self): + self.log.debug("ENTERED") diff = [] @@ -1901,6 +2189,34 @@ def format_diff(self): self.diff_undeploy["vrfNames"].split(",") if self.diff_undeploy else [] ) + msg = "INPUT: diff_create: " + msg += f"{json.dumps(diff_create, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = "INPUT: diff_create_quick: " + msg += f"{json.dumps(diff_create_quick, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = "INPUT: diff_create_update: " + msg += f"{json.dumps(diff_create_update, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = "INPUT: diff_attach: " + msg += f"{json.dumps(diff_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = "INPUT: diff_detach: " + msg += f"{json.dumps(diff_detach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = "INPUT: diff_deploy: " + msg += f"{json.dumps(diff_deploy, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = "INPUT: diff_undeploy: " + msg += f"{json.dumps(diff_undeploy, indent=4, sort_keys=True)}" + self.log.debug(msg) + diff_create.extend(diff_create_quick) diff_create.extend(diff_create_update) diff_attach.extend(diff_detach) @@ -1908,12 +2224,23 @@ def format_diff(self): for want_d in diff_create: - found_a = next( - (vrf for vrf in diff_attach if vrf["vrfName"] == want_d["vrfName"]), - None, + msg = "want_d: " + msg += f"{json.dumps(want_d, indent=4, sort_keys=True)}" + self.log.debug(msg) + + found_a = self.find_dict_in_list_by_key_value( + search=diff_attach, key="vrfName", value=want_d["vrfName"] ) - found_c = want_d + msg = "found_a: " + msg += f"{json.dumps(found_a, indent=4, sort_keys=True)}" + self.log.debug(msg) + + found_c = copy.deepcopy(want_d) + + msg = "found_c: PRE_UPDATE: " + msg += f"{json.dumps(found_c, indent=4, sort_keys=True)}" + self.log.debug(msg) src = found_c["source"] found_c.update({"vrf_name": found_c["vrfName"]}) @@ -1927,37 +2254,79 @@ def format_diff(self): json_to_dict = json.loads(found_c["vrfTemplateConfig"]) found_c.update({"vrf_vlan_name": json_to_dict.get("vrfVlanName", "")}) - found_c.update({"vrf_intf_desc": json_to_dict.get("vrfIntfDescription", "")}) + found_c.update( + {"vrf_intf_desc": json_to_dict.get("vrfIntfDescription", "")} + ) found_c.update({"vrf_description": json_to_dict.get("vrfDescription", "")}) found_c.update({"vrf_int_mtu": json_to_dict.get("mtu", "")}) found_c.update({"loopback_route_tag": json_to_dict.get("tag", "")}) found_c.update({"redist_direct_rmap": json_to_dict.get("vrfRouteMap", "")}) found_c.update({"max_bgp_paths": json_to_dict.get("maxBgpPaths", "")}) found_c.update({"max_ibgp_paths": json_to_dict.get("maxIbgpPaths", "")}) - found_c.update({"ipv6_linklocal_enable": json_to_dict.get("ipv6LinkLocalFlag", True)}) + found_c.update( + {"ipv6_linklocal_enable": json_to_dict.get("ipv6LinkLocalFlag", True)} + ) found_c.update({"trm_enable": json_to_dict.get("trmEnabled", False)}) found_c.update({"rp_external": json_to_dict.get("isRPExternal", False)}) found_c.update({"rp_address": json_to_dict.get("rpAddress", "")}) found_c.update({"rp_loopback_id": json_to_dict.get("loopbackNumber", "")}) - found_c.update({"underlay_mcast_ip": json_to_dict.get("L3VniMcastGroup", "")}) - found_c.update({"overlay_mcast_group": json_to_dict.get("multicastGroup", "")}) - found_c.update({"trm_bgw_msite": json_to_dict.get("trmBGWMSiteEnabled", False)}) - found_c.update({"adv_host_routes": json_to_dict.get("advertiseHostRouteFlag", False)}) - found_c.update({"adv_default_routes": json_to_dict.get("advertiseDefaultRouteFlag", True)}) - found_c.update({"static_default_route": json_to_dict.get("configureStaticDefaultRouteFlag", True)}) + found_c.update( + {"underlay_mcast_ip": json_to_dict.get("L3VniMcastGroup", "")} + ) + found_c.update( + {"overlay_mcast_group": json_to_dict.get("multicastGroup", "")} + ) + found_c.update( + {"trm_bgw_msite": json_to_dict.get("trmBGWMSiteEnabled", False)} + ) + found_c.update( + {"adv_host_routes": json_to_dict.get("advertiseHostRouteFlag", False)} + ) + found_c.update( + { + "adv_default_routes": json_to_dict.get( + "advertiseDefaultRouteFlag", True + ) + } + ) + found_c.update( + { + "static_default_route": json_to_dict.get( + "configureStaticDefaultRouteFlag", True + ) + } + ) found_c.update({"bgp_password": json_to_dict.get("bgpPassword", "")}) - found_c.update({"bgp_passwd_encrypt": json_to_dict.get("bgpPasswordKeyType", "")}) + found_c.update( + {"bgp_passwd_encrypt": json_to_dict.get("bgpPasswordKeyType", "")} + ) if self.dcnm_version > 11: found_c.update({"no_rp": json_to_dict.get("isRPAbsent", False)}) - found_c.update({"netflow_enable": json_to_dict.get("ENABLE_NETFLOW", True)}) + found_c.update( + {"netflow_enable": json_to_dict.get("ENABLE_NETFLOW", True)} + ) found_c.update({"nf_monitor": json_to_dict.get("NETFLOW_MONITOR", "")}) - found_c.update({"disable_rt_auto": json_to_dict.get("disableRtAuto", False)}) - found_c.update({"import_vpn_rt": json_to_dict.get("routeTargetImport", "")}) - found_c.update({"export_vpn_rt": json_to_dict.get("routeTargetExport", "")}) - found_c.update({"import_evpn_rt": json_to_dict.get("routeTargetImportEvpn", "")}) - found_c.update({"export_evpn_rt": json_to_dict.get("routeTargetExportEvpn", "")}) - found_c.update({"import_mvpn_rt": json_to_dict.get("routeTargetImportMvpn", "")}) - found_c.update({"export_mvpn_rt": json_to_dict.get("routeTargetExportMvpn", "")}) + found_c.update( + {"disable_rt_auto": json_to_dict.get("disableRtAuto", False)} + ) + found_c.update( + {"import_vpn_rt": json_to_dict.get("routeTargetImport", "")} + ) + found_c.update( + {"export_vpn_rt": json_to_dict.get("routeTargetExport", "")} + ) + found_c.update( + {"import_evpn_rt": json_to_dict.get("routeTargetImportEvpn", "")} + ) + found_c.update( + {"export_evpn_rt": json_to_dict.get("routeTargetExportEvpn", "")} + ) + found_c.update( + {"import_mvpn_rt": json_to_dict.get("routeTargetImportMvpn", "")} + ) + found_c.update( + {"export_mvpn_rt": json_to_dict.get("routeTargetExportMvpn", "")} + ) del found_c["fabric"] del found_c["vrfName"] @@ -1967,9 +2336,15 @@ def format_diff(self): del found_c["serviceVrfTemplate"] del found_c["vrfTemplateConfig"] + msg = "found_c: POST_UPDATE: " + msg += f"{json.dumps(found_c, indent=4, sort_keys=True)}" + self.log.debug(msg) + if diff_deploy and found_c["vrf_name"] in diff_deploy: diff_deploy.remove(found_c["vrf_name"]) if not found_a: + msg = "not found_a. Appending found_c to diff." + self.log.debug(msg) diff.append(found_c) continue @@ -1986,6 +2361,9 @@ def format_diff(self): attach_d.update({"deploy": a_w["deployment"]}) found_c["attach"].append(attach_d) + msg = "Appending found_c to diff." + self.log.debug(msg) + diff.append(found_c) diff_attach.remove(found_a) @@ -1997,7 +2375,6 @@ def format_diff(self): for a_w in attach: attach_d = {} - for k, v in self.ip_sn.items(): if v == a_w["serialNumber"]: attach_d.update({"ip_address": k}) @@ -2017,27 +2394,34 @@ def format_diff(self): new_deploy_dict = {"vrf_name": vrf} diff.append(new_deploy_dict) - self.diff_input_format = diff + self.diff_input_format = copy.deepcopy(diff) + + msg = "self.diff_input_format: " + msg += f"{json.dumps(self.diff_input_format, indent=4, sort_keys=True)}" + self.log.debug(msg) def get_diff_query(self): + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") - method = "GET" path = self.paths["GET_VRF"].format(self.fabric) - vrf_objects = dcnm_send(self.module, method, path) + vrf_objects = dcnm_send(self.module, "GET", path) missing_fabric, not_ok = self.handle_response(vrf_objects, "query_dcnm") if ( vrf_objects.get("ERROR") == "Not Found" and vrf_objects.get("RETURN_CODE") == 404 ): - self.module.fail_json( - msg="Fabric {0} not present on DCNM".format(self.fabric) - ) + msg = f"{self.class_name}.{method_name}: " + msg += f"Fabric {self.fabric} does not exist on the controller" + self.module.fail_json(msg=msg) return if missing_fabric or not_ok: - msg1 = "Fabric {0} not present on DCNM".format(self.fabric) - msg2 = "Unable to find VRFs under fabric: {0}".format(self.fabric) + # arobel: TODO: Not covered by UT + msg0 = f"{self.class_name}.{method_name}:" + msg1 = f"{msg0} Fabric {self.fabric} not present on the controller" + msg2 = f"{msg0} Unable to find VRFs under fabric: {self.fabric}" self.module.fail_json(msg=msg1 if missing_fabric else msg2) if not vrf_objects["DATA"]: @@ -2055,28 +2439,25 @@ def get_diff_query(self): item["parent"] = vrf # Query the Attachment for the found VRF - method = "GET" path = self.paths["GET_VRF_ATTACH"].format( self.fabric, vrf["vrfName"] ) - vrf_attach_objects = dcnm_send(self.module, method, path) + vrf_attach_objects = dcnm_send(self.module, "GET", path) missing_fabric, not_ok = self.handle_response( vrf_attach_objects, "query_dcnm" ) if missing_fabric or not_ok: - msg1 = "Fabric {0} not present on DCNM".format(self.fabric) - msg2 = ( - "Unable to find attachments for " - "vrfs: {0} under fabric: {1}".format( - vrf["vrfName"], self.fabric - ) + # arobel: TODO: Not covered by UT + msg0 = f"{self.class_name}.{method_name}:" + msg1 = f"{msg0} Fabric {self.fabric} not present on the controller" + msg2 = f"{msg0} Unable to find attachments for " + msg2 += ( + f"vrfs: {vrf['vrfName']} under fabric: {self.fabric}" ) - self.module.fail_json(msg=msg1 if missing_fabric else msg2) - return if not vrf_attach_objects["DATA"]: return @@ -2088,12 +2469,16 @@ def get_diff_query(self): attach_list = vrf_attach["lanAttachList"] for attach in attach_list: - path = self.paths["GET_VRF_SWITCH"].format( - self.fabric, - attach["vrfName"], - attach["switchSerialNo"], + # copy attach and update it with the keys that + # get_vrf_lite_objects() expects. + attach_copy = copy.deepcopy(attach) + attach_copy.update({"fabric": self.fabric}) + attach_copy.update( + {"serialNumber": attach["switchSerialNo"]} + ) + lite_objects = self.get_vrf_lite_objects( + attach_copy ) - lite_objects = dcnm_send(self.module, method, path) if not lite_objects.get("DATA"): return item["attach"].append(lite_objects.get("DATA")[0]) @@ -2107,21 +2492,16 @@ def get_diff_query(self): item["parent"] = vrf # Query the Attachment for the found VRF - method = "GET" path = self.paths["GET_VRF_ATTACH"].format(self.fabric, vrf["vrfName"]) - vrf_attach_objects = dcnm_send(self.module, method, path) + vrf_attach_objects = dcnm_send(self.module, "GET", path) missing_fabric, not_ok = self.handle_response(vrf_objects, "query_dcnm") if missing_fabric or not_ok: - msg1 = "Fabric {0} not present on DCNM".format(self.fabric) - msg2 = ( - "Unable to find attachments for " - "vrfs: {0} under fabric: {1}".format( - vrf["vrfName"], self.fabric - ) - ) + msg1 = f"Fabric {self.fabric} not present on DCNM" + msg2 = "Unable to find attachments for " + msg2 += f"vrfs: {vrf['vrfName']} under fabric: {self.fabric}" self.module.fail_json(msg=msg1 if missing_fabric else msg2) return @@ -2135,11 +2515,13 @@ def get_diff_query(self): attach_list = vrf_attach["lanAttachList"] for attach in attach_list: - path = self.paths["GET_VRF_SWITCH"].format( - self.fabric, attach["vrfName"], attach["switchSerialNo"] - ) + # copy attach and update it with the keys that + # get_vrf_lite_objects() expects. + attach_copy = copy.deepcopy(attach) + attach_copy.update({"fabric": self.fabric}) + attach_copy.update({"serialNumber": attach["switchSerialNo"]}) + lite_objects = self.get_vrf_lite_objects(attach_copy) - lite_objects = dcnm_send(self.module, method, path) if not lite_objects.get("DATA"): return item["attach"].append(lite_objects.get("DATA")[0]) @@ -2147,444 +2529,984 @@ def get_diff_query(self): self.query = query - def push_to_remote(self, is_rollback=False): + def push_diff_create_update(self, is_rollback=False): + """ + # Summary + Send diff_create_update to the controller + """ + self.log.debug("ENTERED") + + action = "create" path = self.paths["GET_VRF"].format(self.fabric) + verb = "PUT" - method = "PUT" if self.diff_create_update: for vrf in self.diff_create_update: - update_path = path + "/{0}".format(vrf["vrfName"]) - resp = dcnm_send(self.module, method, update_path, json.dumps(vrf)) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "create") - if fail: - if is_rollback: - self.failed_to_rollback = True - return - self.failure(resp) - - # - # The detach and un-deploy operations are executed before the create,attach and deploy to particularly - # address cases where a VLAN for vrf attachment being deleted is re-used on a new vrf attachment being - # created. This is needed specially for state: overridden - # + update_path = f"{path}/{vrf['vrfName']}" + self.send_to_controller(action, verb, update_path, vrf, is_rollback) - method = "POST" - if self.diff_detach: - detach_path = path + "/attachments" + def push_diff_detach(self, is_rollback=False): + """ + # Summary - # Update the fabric name to specific fabric to which the switches belong for multisite fabric. - if self.fabric_type == "MFD": - for elem in self.diff_detach: - for node in elem["lanAttachList"]: - node["fabric"] = self.sn_fab[node["serialNumber"]] + Send diff_detach to the controller + """ + self.log.debug("ENTERED") - for d_a in self.diff_detach: - for v_a in d_a["lanAttachList"]: - if "is_deploy" in v_a.keys(): - del v_a["is_deploy"] + if not self.diff_detach: + return - resp = dcnm_send( - self.module, method, detach_path, json.dumps(self.diff_detach) - ) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "attach") - if fail: - if is_rollback: - self.failed_to_rollback = True - return - self.failure(resp) + # For multiste fabric, update the fabric name to the child fabric + # containing the switches + if self.fabric_type == "MFD": + for elem in self.diff_detach: + for node in elem["lanAttachList"]: + node["fabric"] = self.sn_fab[node["serialNumber"]] - method = "POST" - if self.diff_undeploy: - deploy_path = path + "/deployments" - resp = dcnm_send( - self.module, method, deploy_path, json.dumps(self.diff_undeploy) - ) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "deploy") - if fail: - if is_rollback: - self.failed_to_rollback = True - return - self.failure(resp) + for diff_attach in self.diff_detach: + for vrf_attach in diff_attach["lanAttachList"]: + if "is_deploy" in vrf_attach.keys(): + del vrf_attach["is_deploy"] + + action = "attach" + path = self.paths["GET_VRF"].format(self.fabric) + detach_path = path + "/attachments" + verb = "POST" + self.send_to_controller( + action, verb, detach_path, self.diff_detach, is_rollback + ) + + def push_diff_undeploy(self, is_rollback=False): + """ + # Summary + + Send diff_undeploy to the controller + """ + self.log.debug("ENTERED") + + if not self.diff_undeploy: + return + + action = "deploy" + path = self.paths["GET_VRF"].format(self.fabric) + deploy_path = path + "/deployments" + verb = "POST" + + self.send_to_controller( + action, verb, deploy_path, self.diff_undeploy, is_rollback + ) + + def push_diff_delete(self, is_rollback=False): + """ + # Summary + + Send diff_delete to the controller + """ + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") + + if not self.diff_delete: + return + + action = "delete" + path = self.paths["GET_VRF"].format(self.fabric) + verb = "DELETE" del_failure = "" - if self.diff_delete and self.wait_for_vrf_del_ready(): - method = "DELETE" - for vrf, state in self.diff_delete.items(): - if state == "OUT-OF-SYNC": - del_failure += vrf + "," - continue - delete_path = path + "/" + vrf - resp = dcnm_send(self.module, method, delete_path) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "delete") - if fail: - if is_rollback: - self.failed_to_rollback = True - return - self.failure(resp) + self.wait_for_vrf_del_ready() + for vrf, state in self.diff_delete.items(): + if state == "OUT-OF-SYNC": + del_failure += vrf + "," + continue + delete_path = f"{path}/{vrf}" + self.send_to_controller( + action, verb, delete_path, self.diff_detach, is_rollback + ) if del_failure: - self.result["response"].append( - "Deletion of vrfs {0} has failed".format(del_failure[:-1]) - ) + msg = f"{self.class_name}.{method_name}: " + msg += f"Deletion of vrfs {del_failure[:-1]} has failed" + self.result["response"].append(msg) self.module.fail_json(msg=self.result) - method = "POST" - if self.diff_create: + def push_diff_create(self, is_rollback=False): + """ + # Summary - for vrf in self.diff_create: - json_to_dict = json.loads(vrf["vrfTemplateConfig"]) - vlanId = json_to_dict.get("vrfVlanId", "0") + Send diff_create to the controller + """ + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") - if vlanId == 0: - vlan_path = self.paths["GET_VLAN"].format(self.fabric) - vlan_data = dcnm_send(self.module, "GET", vlan_path) + if not self.diff_create: + return - if vlan_data["RETURN_CODE"] != 200: - self.module.fail_json( - msg="Failure getting autogenerated vlan_id {0}".format( - vlan_data - ) - ) - vlanId = vlan_data["DATA"] - - t_conf = { - "vrfSegmentId": vrf["vrfId"], - "vrfName": json_to_dict.get("vrfName", ""), - "vrfVlanId": vlanId, - "vrfVlanName": json_to_dict.get("vrfVlanName"), - "vrfIntfDescription": json_to_dict.get("vrfIntfDescription"), - "vrfDescription": json_to_dict.get("vrfDescription"), - "mtu": json_to_dict.get("mtu"), - "tag": json_to_dict.get("tag"), - "vrfRouteMap": json_to_dict.get("vrfRouteMap"), - "maxBgpPaths": json_to_dict.get("maxBgpPaths"), - "maxIbgpPaths": json_to_dict.get("maxIbgpPaths"), - "ipv6LinkLocalFlag": json_to_dict.get("ipv6LinkLocalFlag"), - "trmEnabled": json_to_dict.get("trmEnabled"), - "isRPExternal": json_to_dict.get("isRPExternal"), - "rpAddress": json_to_dict.get("rpAddress"), - "loopbackNumber": json_to_dict.get("loopbackNumber"), - "L3VniMcastGroup": json_to_dict.get("L3VniMcastGroup"), - "multicastGroup": json_to_dict.get("multicastGroup"), - "trmBGWMSiteEnabled": json_to_dict.get("trmBGWMSiteEnabled"), - "advertiseHostRouteFlag": json_to_dict.get("advertiseHostRouteFlag"), - "advertiseDefaultRouteFlag": json_to_dict.get("advertiseDefaultRouteFlag"), - "configureStaticDefaultRouteFlag": json_to_dict.get("configureStaticDefaultRouteFlag"), - "bgpPassword": json_to_dict.get("bgpPassword"), - "bgpPasswordKeyType": json_to_dict.get("bgpPasswordKeyType"), - } + for vrf in self.diff_create: + json_to_dict = json.loads(vrf["vrfTemplateConfig"]) + vlan_id = json_to_dict.get("vrfVlanId", "0") - if self.dcnm_version > 11: - t_conf.update(isRPAbsent=json_to_dict.get("isRPAbsent")) - t_conf.update(ENABLE_NETFLOW=json_to_dict.get("ENABLE_NETFLOW")) - t_conf.update(NETFLOW_MONITOR=json_to_dict.get("NETFLOW_MONITOR")) - t_conf.update(disableRtAuto=json_to_dict.get("disableRtAuto")) - t_conf.update(routeTargetImport=json_to_dict.get("routeTargetImport")) - t_conf.update(routeTargetExport=json_to_dict.get("routeTargetExport")) - t_conf.update(routeTargetImportEvpn=json_to_dict.get("routeTargetImportEvpn")) - t_conf.update(routeTargetExportEvpn=json_to_dict.get("routeTargetExportEvpn")) - t_conf.update(routeTargetImportMvpn=json_to_dict.get("routeTargetImportMvpn")) - t_conf.update(routeTargetExportMvpn=json_to_dict.get("routeTargetExportMvpn")) - - vrf.update({"vrfTemplateConfig": json.dumps(t_conf)}) - - resp = dcnm_send(self.module, method, path, json.dumps(vrf)) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "create") - if fail: - if is_rollback: - self.failed_to_rollback = True - return - self.failure(resp) - - if self.diff_attach: - for d_a in self.diff_attach: - for v_a in d_a["lanAttachList"]: - v_a.update(vlan=0) - if "is_deploy" in v_a.keys(): - del v_a["is_deploy"] - if v_a.get("vrf_lite"): - for ip, ser in self.ip_sn.items(): - if ser == v_a["serialNumber"]: - """Before apply the vrf_lite config, need double check if the switch role is started wth Border""" - role = self.inventory_data[ip].get("switchRole") - r = re.search(r"\bborder\b", role.lower()) - if not r: - msg = "VRF LITE cannot be attached to switch {0} with role {1}".format( - ip, role - ) - self.module.fail_json(msg=msg) + if vlan_id == 0: + vlan_path = self.paths["GET_VLAN"].format(self.fabric) + vlan_data = dcnm_send(self.module, "GET", vlan_path) - """Get the IP/Interface that is connected to edge router can be get from below query""" - method = "GET" - path = self.paths["GET_VRF_SWITCH"].format( - self.fabric, v_a["vrfName"], v_a["serialNumber"] - ) + if vlan_data["RETURN_CODE"] != 200: + msg = f"{self.class_name}.{method_name}: " + msg += f"Failure getting autogenerated vlan_id {vlan_data}" + self.module.fail_json(msg=msg) - lite_objects = dcnm_send(self.module, method, path) + vlan_id = vlan_data["DATA"] - if not lite_objects.get("DATA"): - return + t_conf = { + "vrfSegmentId": vrf["vrfId"], + "vrfName": json_to_dict.get("vrfName", ""), + "vrfVlanId": vlan_id, + "vrfVlanName": json_to_dict.get("vrfVlanName"), + "vrfIntfDescription": json_to_dict.get("vrfIntfDescription"), + "vrfDescription": json_to_dict.get("vrfDescription"), + "mtu": json_to_dict.get("mtu"), + "tag": json_to_dict.get("tag"), + "vrfRouteMap": json_to_dict.get("vrfRouteMap"), + "maxBgpPaths": json_to_dict.get("maxBgpPaths"), + "maxIbgpPaths": json_to_dict.get("maxIbgpPaths"), + "ipv6LinkLocalFlag": json_to_dict.get("ipv6LinkLocalFlag"), + "trmEnabled": json_to_dict.get("trmEnabled"), + "isRPExternal": json_to_dict.get("isRPExternal"), + "rpAddress": json_to_dict.get("rpAddress"), + "loopbackNumber": json_to_dict.get("loopbackNumber"), + "L3VniMcastGroup": json_to_dict.get("L3VniMcastGroup"), + "multicastGroup": json_to_dict.get("multicastGroup"), + "trmBGWMSiteEnabled": json_to_dict.get("trmBGWMSiteEnabled"), + "advertiseHostRouteFlag": json_to_dict.get("advertiseHostRouteFlag"), + "advertiseDefaultRouteFlag": json_to_dict.get( + "advertiseDefaultRouteFlag" + ), + "configureStaticDefaultRouteFlag": json_to_dict.get( + "configureStaticDefaultRouteFlag" + ), + "bgpPassword": json_to_dict.get("bgpPassword"), + "bgpPasswordKeyType": json_to_dict.get("bgpPasswordKeyType"), + } - lite = lite_objects["DATA"][0]["switchDetailsList"][0][ - "extensionPrototypeValues" - ] - ext_values = None - extension_values = {} - extension_values["VRF_LITE_CONN"] = [] - extension_values["MULTISITE_CONN"] = [] - - for ext_l in lite: - if str(ext_l.get("extensionType")) == "VRF_LITE": - ext_values = ext_l["extensionValues"] - ext_values = ast.literal_eval(ext_values) - for ad_l in v_a.get("vrf_lite"): - if ad_l["interface"] == ext_values["IF_NAME"]: - vrflite_con = {} - vrflite_con["VRF_LITE_CONN"] = [] - vrflite_con["VRF_LITE_CONN"].append({}) - vrflite_con["VRF_LITE_CONN"][0][ - "IF_NAME" - ] = ad_l["interface"] - - if ad_l["dot1q"]: - vrflite_con["VRF_LITE_CONN"][0][ - "DOT1Q_ID" - ] = str(ad_l["dot1q"]) - else: - vrflite_con["VRF_LITE_CONN"][0][ - "DOT1Q_ID" - ] = str(ext_values["DOT1Q_ID"]) - - if ad_l["ipv4_addr"]: - vrflite_con["VRF_LITE_CONN"][0][ - "IP_MASK" - ] = ad_l["ipv4_addr"] - else: - vrflite_con["VRF_LITE_CONN"][0][ - "IP_MASK" - ] = ext_values["IP_MASK"] - - if ad_l["neighbor_ipv4"]: - vrflite_con["VRF_LITE_CONN"][0][ - "NEIGHBOR_IP" - ] = ad_l["neighbor_ipv4"] - else: - vrflite_con["VRF_LITE_CONN"][0][ - "NEIGHBOR_IP" - ] = ext_values["NEIGHBOR_IP"] - - vrflite_con["VRF_LITE_CONN"][0][ - "NEIGHBOR_ASN" - ] = ext_values["NEIGHBOR_ASN"] - - if ad_l["ipv6_addr"]: - vrflite_con["VRF_LITE_CONN"][0][ - "IPV6_MASK" - ] = ad_l["ipv6_addr"] - else: - vrflite_con["VRF_LITE_CONN"][0][ - "IPV6_MASK" - ] = ext_values["IPV6_MASK"] - - if ad_l["neighbor_ipv6"]: - vrflite_con["VRF_LITE_CONN"][0][ - "IPV6_NEIGHBOR" - ] = ad_l["neighbor_ipv6"] - else: - vrflite_con["VRF_LITE_CONN"][0][ - "IPV6_NEIGHBOR" - ] = ext_values["IPV6_NEIGHBOR"] - - vrflite_con["VRF_LITE_CONN"][0][ - "AUTO_VRF_LITE_FLAG" - ] = ext_values["AUTO_VRF_LITE_FLAG"] - - if ad_l["peer_vrf"]: - vrflite_con["VRF_LITE_CONN"][0][ - "PEER_VRF_NAME" - ] = ad_l["peer_vrf"] - else: - vrflite_con["VRF_LITE_CONN"][0][ - "PEER_VRF_NAME" - ] = ext_values["PEER_VRF_NAME"] - - vrflite_con["VRF_LITE_CONN"][0][ - "VRF_LITE_JYTHON_TEMPLATE" - ] = "Ext_VRF_Lite_Jython" - if (extension_values["VRF_LITE_CONN"]): - extension_values["VRF_LITE_CONN"]["VRF_LITE_CONN"].extend(vrflite_con["VRF_LITE_CONN"]) - else: - extension_values["VRF_LITE_CONN"] = vrflite_con - - ms_con = {} - ms_con["MULTISITE_CONN"] = [] - extension_values["MULTISITE_CONN"] = json.dumps( - ms_con - ) - - del ad_l - - if ext_values is None: - for ip, ser in self.ip_sn.items(): - if ser == v_a["serialNumber"]: - msg = "There is no VRF LITE capable interface on this switch {0}".format( - ip - ) - self.module.fail_json(msg=msg) - else: - extension_values["VRF_LITE_CONN"] = json.dumps( - extension_values["VRF_LITE_CONN"] - ) - v_a["extensionValues"] = json.dumps( - extension_values - ).replace(" ", "") - if v_a.get("vrf_lite", None) is not None: - del v_a["vrf_lite"] + if self.dcnm_version > 11: + t_conf.update(isRPAbsent=json_to_dict.get("isRPAbsent")) + t_conf.update(ENABLE_NETFLOW=json_to_dict.get("ENABLE_NETFLOW")) + t_conf.update(NETFLOW_MONITOR=json_to_dict.get("NETFLOW_MONITOR")) + t_conf.update(disableRtAuto=json_to_dict.get("disableRtAuto")) + t_conf.update(routeTargetImport=json_to_dict.get("routeTargetImport")) + t_conf.update(routeTargetExport=json_to_dict.get("routeTargetExport")) + t_conf.update( + routeTargetImportEvpn=json_to_dict.get("routeTargetImportEvpn") + ) + t_conf.update( + routeTargetExportEvpn=json_to_dict.get("routeTargetExportEvpn") + ) + t_conf.update( + routeTargetImportMvpn=json_to_dict.get("routeTargetImportMvpn") + ) + t_conf.update( + routeTargetExportMvpn=json_to_dict.get("routeTargetExportMvpn") + ) - else: - if "vrf_lite" in v_a.keys(): - del v_a["vrf_lite"] + vrf.update({"vrfTemplateConfig": json.dumps(t_conf)}) + action = "create" + verb = "POST" path = self.paths["GET_VRF"].format(self.fabric) - method = "POST" - attach_path = path + "/attachments" - - # Update the fabric name to specific fabric to which the switches belong for multisite fabric. - if self.fabric_type == "MFD": - for elem in self.diff_attach: - for node in elem["lanAttachList"]: - node["fabric"] = self.sn_fab[node["serialNumber"]] - resp = dcnm_send( - self.module, method, attach_path, json.dumps(self.diff_attach) - ) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "attach") - if fail: - if is_rollback: - self.failed_to_rollback = True - return - self.failure(resp) - method = "POST" - if self.diff_deploy: - deploy_path = path + "/deployments" - resp = dcnm_send( - self.module, method, deploy_path, json.dumps(self.diff_deploy) + self.send_to_controller(action, verb, path, vrf, is_rollback) + + def is_border_switch(self, serial_number) -> bool: + """ + # Summary + + Given a switch serial_number: + + - Return True if the switch is a border switch + - Return False otherwise + """ + is_border = False + for ip, serial in self.ip_sn.items(): + if serial != serial_number: + continue + role = self.inventory_data[ip].get("switchRole") + r = re.search(r"\bborder\b", role.lower()) + if r: + is_border = True + return is_border + + def get_extension_values_from_lite_objects(self, lite: list[dict]) -> list: + """ + # Summary + + Given a list of lite objects, return: + + - A list containing the extensionValues, if any, from these + lite objects. + - An empty list, if the lite objects have no extensionValues + + ## Raises + + None + """ + extension_values_list: list[dict] = [] + for item in lite: + if str(item.get("extensionType")) != "VRF_LITE": + continue + extension_values = item["extensionValues"] + extension_values = ast.literal_eval(extension_values) + extension_values_list.append(extension_values) + return extension_values_list + + def update_vrf_attach_vrf_lite_extensions(self, vrf_attach, lite) -> dict: + """ + # Summary + + ## params + - vrf_attach + A vrf_attach object containing a vrf_lite extension + to update + - lite: A list of current vrf_lite extension objects from + the switch + + ## Description + + Merge the values from the vrf_attach object into a matching vrf_lite + extension object (if any) from the switch. Update the vrf_attach + object with the merged result. Return the updated vrf_attach + object. If no matching vrf_lite extension object is found on the + switch, return the unmodified vrf_attach object. + + "matching" in this case means: + + 1. The extensionType of the switch's extension object is VRF_LITE + 2. The IF_NAME in the extensionValues of the extension object + matches the interface in vrf_attach.vrf_lite. + """ + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") + + serial_number = vrf_attach.get("serialNumber") + + msg = f"serial_number: {serial_number}, " + msg += "Received vrf_attach: " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if vrf_attach.get("vrf_lite") is None: + if "vrf_lite" in vrf_attach: + del vrf_attach["vrf_lite"] + vrf_attach["extensionValues"] = "" + msg = "vrf_attach does not contain a vrf_lite configuration. " + msg += "Returning it with empty extensionValues. " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + return copy.deepcopy(vrf_attach) + + msg = f"serial_number: {serial_number}, " + msg += "Received lite: " + msg += f"{json.dumps(lite, indent=4, sort_keys=True)}" + self.log.debug(msg) + + ext_values = self.get_extension_values_from_lite_objects(lite) + if ext_values is None: + ip_address = self.serial_number_to_ip(serial_number) + msg = f"{self.class_name}.{method_name}: " + msg += "No VRF LITE capable interfaces found on " + msg += "this switch. " + msg += f"ip: {ip_address}, " + msg += f"serial_number: {serial_number}" + self.module.fail_json(msg=msg) + + matches: dict = {} + # user_vrf_lite_interfaces and switch_vrf_lite_interfaces + # are used in fail_json message when no matches interfaces + # are found on the switch + user_vrf_lite_interfaces = [] + switch_vrf_lite_interfaces = [] + for item in vrf_attach.get("vrf_lite"): + item_interface = item.get("interface") + user_vrf_lite_interfaces.append(item_interface) + for ext_value in ext_values: + ext_value_interface = ext_value.get("IF_NAME") + switch_vrf_lite_interfaces.append(ext_value_interface) + msg = f"item_interface: {item_interface}, " + msg += f"ext_value_interface: {ext_value_interface}" + self.log.debug(msg) + if item_interface == ext_value_interface: + msg = "Found item: " + msg += f"item[interface] {item_interface}, == " + msg += f"ext_values[IF_NAME] {ext_value_interface}, " + msg += f"{json.dumps(item)}" + self.log.debug(msg) + matches[item_interface] = {} + matches[item_interface]["user"] = item + matches[item_interface]["switch"] = ext_value + if not matches: + # No matches. fail_json here to avoid the following 500 error + # "Provided interface doesn't have extensions" + ip_address = self.serial_number_to_ip(serial_number) + msg = "No matching interfaces with vrf_lite extensions " + msg += f"found on switch {ip_address} ({serial_number}). " + msg += "playbook vrf_lite_interfaces: " + msg += f"{','.join(sorted(user_vrf_lite_interfaces))}. " + msg += "switch vrf_lite_interfaces: " + msg += f"{','.join(sorted(switch_vrf_lite_interfaces))}." + self.log.debug(msg) + self.module.fail_json(msg) + + msg = "Matching extension object(s) found on the switch." + msg += "Proceeding to convert playbook vrf_lite configuration " + msg += "to payload format." + self.log.debug(msg) + + extension_values: dict = {} + extension_values["VRF_LITE_CONN"] = [] + extension_values["MULTISITE_CONN"] = [] + + msg = f"matches: {json.dumps(matches, indent=4, sort_keys=True)}" + self.log.debug(msg) + + for interface, item in matches.items(): + msg = f"interface: {interface}: " + msg += f"{json.dumps(item, indent=4, sort_keys=True)}" + self.log.debug(msg) + + nbr_dict = {} + nbr_dict["IF_NAME"] = item["user"]["interface"] + + if item["user"]["dot1q"]: + nbr_dict["DOT1Q_ID"] = str(item["user"]["dot1q"]) + else: + nbr_dict["DOT1Q_ID"] = str(item["switch"]["DOT1Q_ID"]) + + if item["user"]["ipv4_addr"]: + nbr_dict["IP_MASK"] = item["user"]["ipv4_addr"] + else: + nbr_dict["IP_MASK"] = item["switch"]["IP_MASK"] + + if item["user"]["neighbor_ipv4"]: + nbr_dict["NEIGHBOR_IP"] = item["user"]["neighbor_ipv4"] + else: + nbr_dict["NEIGHBOR_IP"] = item["switch"]["NEIGHBOR_IP"] + + nbr_dict["NEIGHBOR_ASN"] = item["switch"]["NEIGHBOR_ASN"] + + if item["user"]["ipv6_addr"]: + nbr_dict["IPV6_MASK"] = item["user"]["ipv6_addr"] + else: + nbr_dict["IPV6_MASK"] = item["switch"]["IPV6_MASK"] + + if item["user"]["neighbor_ipv6"]: + nbr_dict["IPV6_NEIGHBOR"] = item["user"]["neighbor_ipv6"] + else: + nbr_dict["IPV6_NEIGHBOR"] = item["switch"]["IPV6_NEIGHBOR"] + + nbr_dict["AUTO_VRF_LITE_FLAG"] = item["switch"]["AUTO_VRF_LITE_FLAG"] + + if item["user"]["peer_vrf"]: + nbr_dict["PEER_VRF_NAME"] = item["user"]["peer_vrf"] + else: + nbr_dict["PEER_VRF_NAME"] = item["switch"]["PEER_VRF_NAME"] + + nbr_dict["VRF_LITE_JYTHON_TEMPLATE"] = "Ext_VRF_Lite_Jython" + vrflite_con: dict = {} + vrflite_con["VRF_LITE_CONN"] = [] + vrflite_con["VRF_LITE_CONN"].append(copy.deepcopy(nbr_dict)) + if extension_values["VRF_LITE_CONN"]: + extension_values["VRF_LITE_CONN"]["VRF_LITE_CONN"].extend( + vrflite_con["VRF_LITE_CONN"] + ) + else: + extension_values["VRF_LITE_CONN"] = vrflite_con + + ms_con: dict = {} + ms_con["MULTISITE_CONN"] = [] + extension_values["MULTISITE_CONN"] = json.dumps(ms_con) + + extension_values["VRF_LITE_CONN"] = json.dumps( + extension_values["VRF_LITE_CONN"] ) - self.result["response"].append(resp) - fail, self.result["changed"] = self.handle_response(resp, "deploy") - if fail: - if is_rollback: - self.failed_to_rollback = True + vrf_attach["extensionValues"] = json.dumps(extension_values).replace(" ", "") + if vrf_attach.get("vrf_lite") is not None: + del vrf_attach["vrf_lite"] + + msg = "Returning modified vrf_attach: " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + return copy.deepcopy(vrf_attach) + + def ip_to_serial_number(self, ip_address): + """ + Given a switch ip_address, return the switch serial number. + + If ip_address is not found, return None. + """ + return self.ip_sn.get(ip_address) + + def serial_number_to_ip(self, serial_number): + """ + Given a switch serial_number, return the switch ip address. + + If serial_number is not found, return None. + """ + return self.sn_ip.get(serial_number) + + def send_to_controller(self, action, verb, path, payload, is_rollback=False): + caller = inspect.stack()[1][3] + msg = "ENTERED. " + msg += f"caller: {caller}" + self.log.debug(msg) + + msg = "TX controller: " + msg += f"caller: {caller}, " + msg += f"verb: {verb}, " + msg += f"path: {path}, " + msg += "payload: " + msg += f"{json.dumps(payload, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if payload is not None: + resp = dcnm_send(self.module, verb, path, json.dumps(payload)) + else: + resp = dcnm_send(self.module, verb, path) + + msg = "RX controller: " + msg += f"caller: {caller}, " + msg += f"verb: {verb}, " + msg += f"path: {path}, " + msg += "response: " + msg += f"{json.dumps(resp, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = f"caller: {caller}, " + msg += "Calling self.handle_response." + self.log.debug(msg) + + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, action) + + msg = f"caller: {caller}, " + msg += "Calling self.handle_response. DONE" + self.log.debug(msg) + + if fail: + if is_rollback: + self.failed_to_rollback = True + return + msg = f"caller: {caller}, " + msg += "Calling self.failure." + self.log.debug(msg) + self.failure(resp) + + def push_diff_attach(self, is_rollback=False): + """ + # Summary + + Send diff_attach to the controller + """ + method_name = inspect.stack()[0][3] + msg = "NEW_CODE ENTERED" + self.log.debug(msg) + + msg = "self.diff_attach PRE: " + msg += f"{json.dumps(self.diff_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if not self.diff_attach: + msg = "Early return. self.diff_attach is empty. " + msg += f"{json.dumps(self.diff_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + return + + new_diff_attach_list = [] + for diff_attach in self.diff_attach: + msg = "diff_attach: " + msg += f"{json.dumps(diff_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + new_lan_attach_list = [] + for vrf_attach in diff_attach["lanAttachList"]: + vrf_attach.update(vlan=0) + + serial_number = vrf_attach.get("serialNumber") + ip_address = self.serial_number_to_ip(serial_number) + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "vrf_attach: " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if "is_deploy" in vrf_attach: + del vrf_attach["is_deploy"] + if not vrf_attach.get("vrf_lite"): + if "vrf_lite" in vrf_attach: + del vrf_attach["vrf_lite"] + new_lan_attach_list.append(vrf_attach) + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "Skipping vrf_lite: " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + continue + + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "vrf_attach.get(vrf_lite): " + msg += f"{json.dumps(vrf_attach.get('vrf_lite'), indent=4, sort_keys=True)}" + self.log.debug(msg) + + if not self.is_border_switch(serial_number): + # arobel TODO: Not covered by UT + msg = f"{self.class_name}.{method_name}: " + msg += "VRF LITE cannot be attached to " + msg += "non-border switch. " + msg += f"ip: {ip_address}, " + msg += f"serial number: {serial_number}" + self.module.fail_json(msg=msg) + + lite_objects = self.get_vrf_lite_objects(vrf_attach) + + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "lite_objects: " + msg += f"{json.dumps(lite_objects, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if not lite_objects.get("DATA"): + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "Early return, no lite objects." + self.log.debug(msg) return - self.failure(resp) + + lite = lite_objects["DATA"][0]["switchDetailsList"][0][ + "extensionPrototypeValues" + ] + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "lite: " + msg += f"{json.dumps(lite, indent=4, sort_keys=True)}" + self.log.debug(msg) + + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "old vrf_attach: " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + vrf_attach = self.update_vrf_attach_vrf_lite_extensions( + vrf_attach, lite + ) + msg = f"ip_address {ip_address} ({serial_number}), " + msg += "new vrf_attach: " + msg += f"{json.dumps(vrf_attach, indent=4, sort_keys=True)}" + self.log.debug(msg) + + new_lan_attach_list.append(vrf_attach) + + msg = "Updating diff_attach[lanAttachList] with: " + msg += f"{json.dumps(new_lan_attach_list, indent=4, sort_keys=True)}" + self.log.debug(msg) + + diff_attach["lanAttachList"] = copy.deepcopy(new_lan_attach_list) + new_diff_attach_list.append(copy.deepcopy(diff_attach)) + + msg = "new_diff_attach_list: " + msg += f"{json.dumps(new_diff_attach_list, indent=4, sort_keys=True)}" + self.log.debug(msg) + + action = "attach" + verb = "POST" + path = self.paths["GET_VRF"].format(self.fabric) + attach_path = path + "/attachments" + + # For multisite fabrics, update the fabric name to the child fabric + # containing the switches. + if self.fabric_type == "MFD": + for elem in new_diff_attach_list: + for node in elem["lanAttachList"]: + node["fabric"] = self.sn_fab[node["serialNumber"]] + + self.send_to_controller( + action, verb, attach_path, new_diff_attach_list, is_rollback + ) + + def push_diff_deploy(self, is_rollback=False): + """ + # Summary + + Send diff_deploy to the controller + """ + self.log.debug("ENTERED") + + if not self.diff_deploy: + return + + action = "deploy" + verb = "POST" + path = self.paths["GET_VRF"].format(self.fabric) + deploy_path = path + "/deployments" + + self.send_to_controller( + action, verb, deploy_path, self.diff_deploy, is_rollback + ) + + def release_resources_by_id(self, id_list=None): + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") + + if id_list is None: + return + if not isinstance(id_list, list): + msg = f"{self.class_name}.{method_name}: " + msg += "id_list must be a list of resource IDs. " + msg += f"Got: {id_list}." + self.module.fail_json(msg) + + try: + id_list = [int(x) for x in id_list] + except ValueError as error: + msg = f"{self.class_name}.{method_name}: " + msg += "id_list must be a list of resource IDs. " + msg += "Where each id is convertable to integer." + msg += f"Got: {id_list}. " + msg += f"Error detail: {error}" + self.module.fail_json(msg) + + id_list = [str(x) for x in id_list] + + msg = f"Releasing resource IDs: {','.join(id_list)}" + self.log.debug(msg) + + action = "deploy" + path = "/appcenter/cisco/ndfc/api/v1/lan-fabric" + path += "/rest/resource-manager/resources" + path += f"?id={','.join(id_list)}" + verb = "DELETE" + self.send_to_controller(action, verb, path, None) + + def release_orphaned_resources(self, vrf, is_rollback=False): + """ + # Summary + + Release orphaned resources. + + ## Description + + After a VRF delete operation, resources such as the TOP_DOWN_VRF_VLAN + resource below, can be orphaned from their VRFs. Below, notice that + resourcePool.vrfName is null. This method releases resources if + the following are true for the resources: + + - allocatedFlag is False + - entityName == vrf + - fabricName == self.fabric + + ```json + [ + { + "id": 36368, + "resourcePool": { + "id": 0, + "poolName": "TOP_DOWN_VRF_VLAN", + "fabricName": "f1", + "vrfName": null, + "poolType": "ID_POOL", + "dynamicSubnetRange": null, + "targetSubnet": 0, + "overlapAllowed": false, + "hierarchicalKey": "f1" + }, + "entityType": "Device", + "entityName": "VRF_1", + "allocatedIp": "201", + "allocatedOn": 1734040978066, + "allocatedFlag": false, + "allocatedScopeValue": "FDO211218GC", + "ipAddress": "172.22.150.103", + "switchName": "cvd-1312-leaf", + "hierarchicalKey": "0" + } + ] + ``` + """ + self.log.debug("ENTERED") + + path = "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/" + path += f"resource-manager/fabric/{self.fabric}/" + path += "pools/TOP_DOWN_VRF_VLAN" + resp = dcnm_send(self.module, "GET", path) + self.result["response"].append(resp) + fail, self.result["changed"] = self.handle_response(resp, "deploy") + if fail: + if is_rollback: + self.failed_to_rollback = True + return + self.failure(resp) + + delete_ids = [] + for item in resp["DATA"]: + if "entityName" not in item: + continue + if item["entityName"] != vrf: + continue + if item.get("allocatedFlag") is not False: + continue + if item.get("id") is None: + continue + + msg = f"item {json.dumps(item, indent=4, sort_keys=True)}" + self.log.debug(msg) + + delete_ids.append(item["id"]) + + if len(delete_ids) == 0: + return + self.release_resources_by_id(delete_ids) + + def push_to_remote(self, is_rollback=False): + """ + # Summary + + Send all diffs to the controller + """ + self.log.debug("ENTERED") + + self.push_diff_create_update(is_rollback) + + # The detach and un-deploy operations are executed before the + # create,attach and deploy to address cases where a VLAN for vrf + # attachment being deleted is re-used on a new vrf attachment being + # created. This is needed specially for state: overridden + + self.push_diff_detach(is_rollback) + self.push_diff_undeploy(is_rollback) + + self.push_diff_delete(is_rollback) + for vrf_name in self.diff_delete: + self.release_orphaned_resources(vrf_name, is_rollback) + + self.push_diff_create(is_rollback) + self.push_diff_attach(is_rollback) + self.push_diff_deploy(is_rollback) def wait_for_vrf_del_ready(self): + """ + # Summary - method = "GET" - if self.diff_delete: - for vrf in self.diff_delete: - state = False - path = self.paths["GET_VRF_ATTACH"].format(self.fabric, vrf) - while not state: - resp = dcnm_send(self.module, method, path) - state = True - if resp.get("DATA") is not None: - attach_list = resp["DATA"][0]["lanAttachList"] - for atch in attach_list: - if ( - atch["lanAttachState"] == "OUT-OF-SYNC" - or atch["lanAttachState"] == "FAILED" - ): - self.diff_delete.update({vrf: "OUT-OF-SYNC"}) - break - if atch["lanAttachState"] != "NA": - self.diff_delete.update({vrf: "DEPLOYED"}) - state = False - time.sleep(self.WAIT_TIME_FOR_DELETE_LOOP) - break - self.diff_delete.update({vrf: "NA"}) + Wait for VRFs to be ready for deletion. - return True + ## Raises + + Calls fail_json if VRF has associated network attachments. + """ + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") + + for vrf in self.diff_delete: + ok_to_delete = False + path = self.paths["GET_VRF_ATTACH"].format(self.fabric, vrf) + + while not ok_to_delete: + resp = dcnm_send(self.module, "GET", path) + ok_to_delete = True + if resp.get("DATA") is None: + time.sleep(self.WAIT_TIME_FOR_DELETE_LOOP) + continue + + attach_list = resp["DATA"][0]["lanAttachList"] + msg = f"ok_to_delete: {ok_to_delete}, " + msg += f"attach_list: {json.dumps(attach_list, indent=4)}" + self.log.debug(msg) + + for attach in attach_list: + if ( + attach["lanAttachState"] == "OUT-OF-SYNC" + or attach["lanAttachState"] == "FAILED" + ): + self.diff_delete.update({vrf: "OUT-OF-SYNC"}) + break + if ( + attach["lanAttachState"] == "DEPLOYED" + and attach["isLanAttached"] is True + ): + vrf_name = attach.get("vrfName", "unknown") + fabric_name = attach.get("fabricName", "unknown") + switch_ip = attach.get("ipAddress", "unknown") + switch_name = attach.get("switchName", "unknown") + vlan_id = attach.get("vlanId", "unknown") + msg = f"{self.class_name}.{method_name}: " + msg += f"Network attachments associated with vrf {vrf_name} " + msg += "must be removed (e.g. using the dcnm_network module) " + msg += "prior to deleting the vrf. " + msg += f"Details: fabric_name: {fabric_name}, " + msg += f"vrf_name: {vrf_name}. " + msg += "Network attachments found on " + msg += f"switch_ip: {switch_ip}, " + msg += f"switch_name: {switch_name}, " + msg += f"vlan_id: {vlan_id}" + self.module.fail_json(msg=msg) + if attach["lanAttachState"] != "NA": + time.sleep(self.WAIT_TIME_FOR_DELETE_LOOP) + self.diff_delete.update({vrf: "DEPLOYED"}) + ok_to_delete = False + break + self.diff_delete.update({vrf: "NA"}) + + def attach_spec(self): + """ + # Summary + + Return the argument spec for network attachments + """ + spec = {} + spec["deploy"] = {"default": True, "type": "bool"} + spec["ip_address"] = {"required": True, "type": "str"} + spec["import_evpn_rt"] = {"default": "", "type": "str"} + spec["export_evpn_rt"] = {"default": "", "type": "str"} + + if self.state in ("merged", "overridden", "replaced"): + spec["vrf_lite"] = {"type": "list"} + else: + spec["vrf_lite"] = {"default": [], "type": "list"} + + return copy.deepcopy(spec) + + def lite_spec(self): + """ + # Summary + + Return the argument spec for VRF LITE parameters + """ + spec = {} + spec["dot1q"] = {"type": "int"} + spec["ipv4_addr"] = {"type": "ipv4_subnet"} + spec["ipv6_addr"] = {"type": "ipv6"} + spec["neighbor_ipv4"] = {"type": "ipv4"} + spec["neighbor_ipv6"] = {"type": "ipv6"} + spec["peer_vrf"] = {"type": "str"} + + if self.state in ("merged", "overridden", "replaced"): + spec["interface"] = {"required": True, "type": "str"} + else: + spec["interface"] = {"type": "str"} + + return copy.deepcopy(spec) + + def vrf_spec(self): + """ + # Summary + + Return the argument spec for VRF parameters + """ + spec = {} + spec["adv_default_routes"] = {"default": True, "type": "bool"} + spec["adv_host_routes"] = {"default": False, "type": "bool"} + + spec["attach"] = {"type": "list"} + spec["bgp_password"] = {"default": "", "type": "str"} + spec["bgp_passwd_encrypt"] = {"choices": [3, 7], "default": 3, "type": "int"} + spec["disable_rt_auto"] = {"default": False, "type": "bool"} + + spec["export_evpn_rt"] = {"default": "", "type": "str"} + spec["export_mvpn_rt"] = {"default": "", "type": "str"} + spec["export_vpn_rt"] = {"default": "", "type": "str"} + + spec["import_evpn_rt"] = {"default": "", "type": "str"} + spec["import_mvpn_rt"] = {"default": "", "type": "str"} + spec["import_vpn_rt"] = {"default": "", "type": "str"} + + spec["ipv6_linklocal_enable"] = {"default": True, "type": "bool"} + + spec["loopback_route_tag"] = { + "default": 12345, + "range_max": 4294967295, + "type": "int", + } + spec["max_bgp_paths"] = { + "default": 1, + "range_max": 64, + "range_min": 1, + "type": "int", + } + spec["max_ibgp_paths"] = { + "default": 2, + "range_max": 64, + "range_min": 1, + "type": "int", + } + spec["netflow_enable"] = {"default": False, "type": "bool"} + spec["nf_monitor"] = {"default": "", "type": "str"} + + spec["no_rp"] = {"default": False, "type": "bool"} + spec["overlay_mcast_group"] = {"default": "", "type": "str"} + + spec["redist_direct_rmap"] = { + "default": "FABRIC-RMAP-REDIST-SUBNET", + "type": "str", + } + spec["rp_address"] = {"default": "", "type": "str"} + spec["rp_external"] = {"default": False, "type": "bool"} + spec["rp_loopback_id"] = {"default": "", "range_max": 1023, "type": "int"} + + spec["service_vrf_template"] = {"default": None, "type": "str"} + spec["source"] = {"default": None, "type": "str"} + spec["static_default_route"] = {"default": True, "type": "bool"} + + spec["trm_bgw_msite"] = {"default": False, "type": "bool"} + spec["trm_enable"] = {"default": False, "type": "bool"} + + spec["underlay_mcast_ip"] = {"default": "", "type": "str"} + + spec["vlan_id"] = {"range_max": 4094, "type": "int"} + spec["vrf_description"] = {"default": "", "type": "str"} + spec["vrf_id"] = {"range_max": 16777214, "type": "int"} + spec["vrf_intf_desc"] = {"default": "", "type": "str"} + spec["vrf_int_mtu"] = { + "default": 9216, + "range_max": 9216, + "range_min": 68, + "type": "int", + } + spec["vrf_name"] = {"length_max": 32, "required": True, "type": "str"} + spec["vrf_template"] = {"default": "Default_VRF_Universal", "type": "str"} + spec["vrf_extension_template"] = { + "default": "Default_VRF_Extension_Universal", + "type": "str", + } + spec["vrf_vlan_name"] = {"default": "", "type": "str"} + + if self.state in ("merged", "overridden", "replaced"): + spec["deploy"] = {"default": True, "type": "bool"} + else: + spec["deploy"] = {"type": "bool"} + + return copy.deepcopy(spec) def validate_input(self): """Parse the playbook values, validate to param specs.""" + method_name = inspect.stack()[0][3] + self.log.debug("ENTERED") - state = self.params["state"] + attach_spec = self.attach_spec() + lite_spec = self.lite_spec() + vrf_spec = self.vrf_spec() - if state == "merged" or state == "overridden" or state == "replaced": + msg = "attach_spec: " + msg += f"{json.dumps(attach_spec, indent=4, sort_keys=True)}" + self.log.debug(msg) - vrf_spec = dict( - vrf_name=dict(required=True, type="str", length_max=32), - vrf_id=dict(type="int", range_max=16777214), - vrf_template=dict(type="str", default="Default_VRF_Universal"), - vrf_extension_template=dict( - type="str", default="Default_VRF_Extension_Universal" - ), - vlan_id=dict(type="int", range_max=4094), - source=dict(type="str", default=None), - service_vrf_template=dict(type="str", default=None), - attach=dict(type="list"), - deploy=dict(type="bool", default=True), - vrf_vlan_name=dict(type="str", default=""), - vrf_intf_desc=dict(type="str", default=""), - vrf_description=dict(type="str", default=""), - vrf_int_mtu=dict(type="int", range_min=68, range_max=9216, default=9216), - loopback_route_tag=dict(type="int", default=12345, range_max=4294967295), - redist_direct_rmap=dict(type="str", default="FABRIC-RMAP-REDIST-SUBNET"), - max_bgp_paths=dict(type="int", range_min=1, range_max=64, default=1), - max_ibgp_paths=dict(type="int", range_min=1, range_max=64, default=2), - ipv6_linklocal_enable=dict(type="bool", default=True), - trm_enable=dict(type="bool", default=False), - no_rp=dict(type="bool", default=False), - rp_external=dict(type="bool", default=False), - rp_address=dict(type="str", default=""), - rp_loopback_id=dict(type="int", range_max=1023, default=""), - underlay_mcast_ip=dict(type="str", default=""), - overlay_mcast_group=dict(type="str", default=""), - trm_bgw_msite=dict(type="bool", default=False), - adv_host_routes=dict(type="bool", default=False), - adv_default_routes=dict(type="bool", default=True), - static_default_route=dict(type="bool", default=True), - bgp_password=dict(type="str", default=""), - bgp_passwd_encrypt=dict(type="int", default=3, choices=[3, 7]), - netflow_enable=dict(type="bool", default=False), - nf_monitor=dict(type="str", default=""), - disable_rt_auto=dict(type="bool", default=False), - import_vpn_rt=dict(type="str", default=""), - export_vpn_rt=dict(type="str", default=""), - import_evpn_rt=dict(type="str", default=""), - export_evpn_rt=dict(type="str", default=""), - import_mvpn_rt=dict(type="str", default=""), - export_mvpn_rt=dict(type="str", default=""), - ) - att_spec = dict( - ip_address=dict(required=True, type="str"), - deploy=dict(type="bool", default=True), - vrf_lite=dict(type="list"), - import_evpn_rt=dict(type="str", default=""), - export_evpn_rt=dict(type="str", default=""), - ) - lite_spec = dict( - interface=dict(required=True, type="str"), - peer_vrf=dict(type="str"), - ipv4_addr=dict(type="ipv4_subnet"), - neighbor_ipv4=dict(type="ipv4"), - ipv6_addr=dict(type="ipv6"), - neighbor_ipv6=dict(type="ipv6"), - dot1q=dict(type="int"), - ) + msg = "lite_spec: " + msg += f"{json.dumps(lite_spec, indent=4, sort_keys=True)}" + self.log.debug(msg) - msg = None + msg = "vrf_spec: " + msg += f"{json.dumps(vrf_spec, indent=4, sort_keys=True)}" + self.log.debug(msg) + + if self.state in ("merged", "overridden", "replaced"): + fail_msg_list = [] if self.config: for vrf in self.config: + msg = f"state {self.state}: " + msg += "self.config[vrf]: " + msg += f"{json.dumps(vrf, indent=4, sort_keys=True)}" + self.log.debug(msg) # A few user provided vrf parameters need special handling # Ignore user input for src and hard code it to None vrf["source"] = None @@ -2592,21 +3514,24 @@ def validate_input(self): vrf["service_vrf_template"] = None if "vrf_name" not in vrf: - msg = "vrf_name is mandatory under vrf parameters" + fail_msg_list.append( + "vrf_name is mandatory under vrf parameters" + ) - if "attach" in vrf and vrf["attach"]: + if isinstance(vrf.get("attach"), list): for attach in vrf["attach"]: - # if 'ip_address' not in attach or 'vlan_id' not in attach: - # msg = "ip_address and vlan_id are mandatory under attach parameters" if "ip_address" not in attach: - msg = "ip_address is mandatory under attach parameters" + fail_msg_list.append( + "ip_address is mandatory under attach parameters" + ) else: - if state == "merged" or state == "replaced": - msg = "config: element is mandatory for this state {0}".format( - state - ) + if self.state in ("merged", "replaced"): + msg = f"config element is mandatory for {self.state} state" + fail_msg_list.append(msg) - if msg: + if fail_msg_list: + msg = f"{self.class_name}.{method_name}: " + msg += ",".join(fail_msg_list) self.module.fail_json(msg=msg) if self.config: @@ -2614,12 +3539,22 @@ def validate_input(self): self.config, vrf_spec ) for vrf in valid_vrf: + + msg = f"state {self.state}: " + msg += "valid_vrf[vrf]: " + msg += f"{json.dumps(vrf, indent=4, sort_keys=True)}" + self.log.debug(msg) + if vrf.get("attach"): for entry in vrf.get("attach"): entry["deploy"] = vrf["deploy"] valid_att, invalid_att = validate_list_of_dicts( - vrf["attach"], att_spec + vrf["attach"], attach_spec ) + msg = f"state {self.state}: " + msg += "valid_att: " + msg += f"{json.dumps(valid_att, indent=4, sort_keys=True)}" + self.log.debug(msg) vrf["attach"] = valid_att invalid_params.extend(invalid_att) @@ -2628,79 +3563,24 @@ def validate_input(self): valid_lite, invalid_lite = validate_list_of_dicts( lite["vrf_lite"], lite_spec ) + msg = f"state {self.state}: " + msg += "valid_lite: " + msg += f"{json.dumps(valid_lite, indent=4, sort_keys=True)}" + self.log.debug(msg) + lite["vrf_lite"] = valid_lite invalid_params.extend(invalid_lite) self.validated.append(vrf) if invalid_params: - msg = "Invalid parameters in playbook: {0}".format( - "\n".join(invalid_params) - ) + # arobel: TODO: Not in UT + msg = f"{self.class_name}.{method_name}: " + msg += "Invalid parameters in playbook: " + msg += f"{','.join(invalid_params)}" self.module.fail_json(msg=msg) else: - vrf_spec = dict( - vrf_name=dict(required=True, type="str", length_max=32), - vrf_id=dict(type="int", range_max=16777214), - vrf_template=dict(type="str", default="Default_VRF_Universal"), - vrf_extension_template=dict( - type="str", default="Default_VRF_Extension_Universal" - ), - vlan_id=dict(type="int", range_max=4094), - source=dict(type="str", default=None), - service_vrf_template=dict(type="str", default=None), - attach=dict(type="list"), - deploy=dict(type="bool"), - vrf_vlan_name=dict(type="str", default=""), - vrf_intf_desc=dict(type="str", default=""), - vrf_description=dict(type="str", default=""), - vrf_int_mtu=dict(type="int", range_min=68, range_max=9216, default=9216), - loopback_route_tag=dict(type="int", default=12345, range_max=4294967295), - redist_direct_rmap=dict(type="str", default="FABRIC-RMAP-REDIST-SUBNET"), - max_bgp_paths=dict(type="int", range_min=1, range_max=64, default=1), - max_ibgp_paths=dict(type="int", range_min=1, range_max=64, default=2), - ipv6_linklocal_enable=dict(type="bool", default=True), - trm_enable=dict(type="bool", default=False), - no_rp=dict(type="bool", default=False), - rp_external=dict(type="bool", default=False), - rp_address=dict(type="str", default=""), - rp_loopback_id=dict(type="int", range_max=1023, default=""), - underlay_mcast_ip=dict(type="str", default=""), - overlay_mcast_group=dict(type="str", default=""), - trm_bgw_msite=dict(type="bool", default=False), - adv_host_routes=dict(type="bool", default=False), - adv_default_routes=dict(type="bool", default=True), - static_default_route=dict(type="bool", default=True), - bgp_password=dict(type="str", default=""), - bgp_passwd_encrypt=dict(type="int", default=3, choices=[3, 7]), - netflow_enable=dict(type="bool", default=False), - nf_monitor=dict(type="str", default=""), - disable_rt_auto=dict(type="bool", default=False), - import_vpn_rt=dict(type="str", default=""), - export_vpn_rt=dict(type="str", default=""), - import_evpn_rt=dict(type="str", default=""), - export_evpn_rt=dict(type="str", default=""), - import_mvpn_rt=dict(type="str", default=""), - export_mvpn_rt=dict(type="str", default=""), - ) - att_spec = dict( - ip_address=dict(required=True, type="str"), - deploy=dict(type="bool", default=True), - vrf_lite=dict(type="list", default=[]), - import_evpn_rt=dict(type="str", default=""), - export_evpn_rt=dict(type="str", default=""), - ) - lite_spec = dict( - interface=dict(type="str"), - peer_vrf=dict(type="str"), - ipv4_addr=dict(type="ipv4_subnet"), - neighbor_ipv4=dict(type="ipv4"), - ipv6_addr=dict(type="ipv6"), - neighbor_ipv6=dict(type="ipv6"), - dot1q=dict(type="int"), - ) - if self.config: valid_vrf, invalid_params = validate_list_of_dicts( self.config, vrf_spec @@ -2708,7 +3588,7 @@ def validate_input(self): for vrf in valid_vrf: if vrf.get("attach"): valid_att, invalid_att = validate_list_of_dicts( - vrf["attach"], att_spec + vrf["attach"], attach_spec ) vrf["attach"] = valid_att invalid_params.extend(invalid_att) @@ -2722,20 +3602,21 @@ def validate_input(self): self.validated.append(vrf) if invalid_params: - msg = "Invalid parameters in playbook: {0}".format( - "\n".join(invalid_params) - ) + # arobel: TODO: Not in UT + msg = f"{self.class_name}.{method_name}: " + msg += "Invalid parameters in playbook: " + msg += f"{','.join(invalid_params)}" self.module.fail_json(msg=msg) def handle_response(self, res, op): + self.log.debug("ENTERED") fail = False changed = True if op == "query_dcnm": - # This if blocks handles responses to the query APIs against DCNM. + # These if blocks handle responses to the query APIs. # Basically all GET operations. - # if res.get("ERROR") == "Not Found" and res["RETURN_CODE"] == 404: return True, False if res["RETURN_CODE"] != 200 or res["MESSAGE"] != "OK": @@ -2759,15 +3640,14 @@ def handle_response(self, res, op): return fail, changed def failure(self, resp): - - # Donot Rollback for Multi-site fabrics + # Do not Rollback for Multi-site fabrics if self.fabric_type == "MFD": self.failed_to_rollback = True self.module.fail_json(msg=resp) return - # Implementing a per task rollback logic here so that we rollback DCNM to the have state - # whenever there is a failure in any of the APIs. + # Implementing a per task rollback logic here so that we rollback + # to the have state whenever there is a failure in any of the APIs. # The idea would be to run overridden state with want=have and have=dcnm_state self.want_create = self.have_create self.want_attach = self.have_attach @@ -2782,7 +3662,8 @@ def failure(self, resp): self.push_to_remote(True) if self.failed_to_rollback: - msg1 = "FAILED - Attempted rollback of the task has failed, may need manual intervention" + msg1 = "FAILED - Attempted rollback of the task has failed, " + msg1 += "may need manual intervention" else: msg1 = "SUCCESS - Attempted rollback of the task has succeeded" @@ -2806,6 +3687,13 @@ def failure(self, resp): def main(): """main entry point for module execution""" + # Logging setup + try: + log = Log() + log.commit() + except (TypeError, ValueError): + pass + element_spec = dict( fabric=dict(required=True, type="str"), config=dict(required=False, type="list", elements="dict"), @@ -2820,11 +3708,9 @@ def main(): dcnm_vrf = DcnmVrf(module) if not dcnm_vrf.ip_sn: - module.fail_json( - msg="Fabric {0} missing on DCNM or does not have any switches".format( - dcnm_vrf.fabric - ) - ) + msg = f"Fabric {dcnm_vrf.fabric} missing on the controller or " + msg += "does not have any switches" + module.fail_json(msg=msg) dcnm_vrf.validate_input() diff --git a/tests/integration/targets/dcnm_vrf/tasks/dcnm.yaml b/tests/integration/targets/dcnm_vrf/tasks/dcnm.yaml index 0154c6721..90fdd832c 100644 --- a/tests/integration/targets/dcnm_vrf/tasks/dcnm.yaml +++ b/tests/integration/targets/dcnm_vrf/tasks/dcnm.yaml @@ -16,8 +16,16 @@ set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}" tags: sanity +- name: debug + debug: + var: test_items + +- name: debug + debug: + var: testcase + - name: run test cases (connection=httpapi) - include: "{{ test_case_to_run }} ansible_connection=httpapi connection={{ dcnm }}" + include_tasks: "{{ test_case_to_run }}" with_items: "{{ test_items }}" loop_control: loop_var: test_case_to_run diff --git a/tests/integration/targets/dcnm_vrf/tasks/main.yaml b/tests/integration/targets/dcnm_vrf/tasks/main.yaml index e2c92c5f8..22b634ff4 100644 --- a/tests/integration/targets/dcnm_vrf/tasks/main.yaml +++ b/tests/integration/targets/dcnm_vrf/tasks/main.yaml @@ -38,4 +38,6 @@ - 'controller_version != "Unable to determine controller version"' tags: sanity -- { include: dcnm.yaml, tags: ['dcnm'] } +- name: Import Role Tasks + ansible.builtin.import_tasks: dcnm.yaml + tags: ['dcnm'] \ No newline at end of file diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/deleted.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/deleted.yaml index 489b94c8c..c793b0a1d 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/deleted.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/deleted.yaml @@ -1,33 +1,71 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-lite capable switch +# +# interface_2a +# +# - Ethernet interface on switch_2 +# - Used to test VRF LITE configuration. +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" -- name: DELETED - Verify if fabric is deployed. +- name: SETUP.0 - DELETED - print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + - "switch_1 : {{ switch_1 }}" + - "switch_2 : {{ switch_2 }}" + - "interface_2a : {{ interface_2a }}" + +- name: SETUP.1 - DELETED - [dcnm_rest.GET] Verify fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" - register: result + register: result_setup_1 - assert: that: - - 'result.response.DATA != None' + - 'result_setup_1.response.DATA != None' -- name: DELETED - Clean up any existing vrfs +- name: SETUP.2 - DELETED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: result_setup_2 -- name: DELETED - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: SETUP.2a - DELETED - [wait_for] Wait 40 seconds for controller and switch to sync + wait_for: + timeout: 40 + when: result_setup_2.changed == true + +- name: SETUP.3 - DELETED - [merged] Create, Attach, Deploy VLAN+VRF ansible-vrf cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -36,77 +74,93 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_setup_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: SETUP.4 - DELETED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_setup_4 until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_setup_4.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: SETUP.4a - DELETED - [debug] print result_setup_3 + debug: + var: result_setup_3 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_setup_3.changed == true' + - 'result_setup_3.response[0].RETURN_CODE == 200' + - 'result_setup_3.response[1].RETURN_CODE == 200' + - 'result_setup_3.response[2].RETURN_CODE == 200' + - '(result_setup_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_setup_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_setup_3.diff[0].attach[0].deploy == true' + - 'result_setup_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_setup_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_setup_3.diff[0].attach[1].ip_address' + - 'result_setup_3.diff[0].vrf_name == "ansible-vrf-int1"' ############################################### ### DELETED ## ############################################### -- name: DELETED - Delete VRF using deleted state - cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" +- name: TEST.1 - DELETED - [deleted] Delete VRF ansible-vrf-int1 + cisco.dcnm.dcnm_vrf: &conf1 + fabric: "{{ fabric_1 }}" state: deleted config: - vrf_name: ansible-vrf-int1 vrf_id: 9008011 vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal - register: result + register: result_1 + +- name: TEST.1a - DELETED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + +- name: TEST.1b - DELETED - [debug] print result_1 + debug: + var: result_1 - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[1].MESSAGE == "OK"' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[2].METHOD == "DELETE"' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: DELETED - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf - register: result + - 'result_1.changed == true' + - 'result_1.response[0].RETURN_CODE == 200' + - 'result_1.response[1].RETURN_CODE == 200' + - 'result_1.response[1].MESSAGE == "OK"' + - 'result_1.response[2].RETURN_CODE == 200' + - 'result_1.response[2].METHOD == "DELETE"' + - '(result_1.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_1.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_1.diff[0].attach[0].deploy == false' + - 'result_1.diff[0].attach[1].deploy == false' + - 'result_1.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.1c - DELETED - [deleted] conf1 - Idempotence + cisco.dcnm.dcnm_vrf: *conf1 + register: result_1c + +- name: TEST.1d - DELETED - [debug] print result_1c + debug: + var: result_1c - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' - - 'result.diff|length == 0' + - 'result_1c.changed == false' + - 'result_1c.response|length == 0' + - 'result_1c.diff|length == 0' -- name: DELETED - Create, Attach and Deploy new VRF - VLAN/VRF LITE EXTENSION Provided by the User in one switch +- name: TEST.2 - DELETED - [merged] Create, Attach, Deploy VLAN+VRF+LITE ansible-vrf-int1 switch_2 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -115,46 +169,50 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_2 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2a - DELETED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_2a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.2b - DELETED - [debug] print result_2 + debug: + var: result_2 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: DELETED - Delete VRF/VRF LITE - VLAN/VRF LITE EXTENSION Provided by the User in one switch - cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + - 'result_2.changed == true' + - 'result_2.response[0].RETURN_CODE == 200' + - 'result_2.response[1].RETURN_CODE == 200' + - 'result_2.response[2].RETURN_CODE == 200' + - '(result_2.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2.diff[0].attach[0].deploy == true' + - 'result_2.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_2.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_2.diff[0].attach[1].ip_address' + - 'result_2.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.2b - DELETED - [deleted] Delete VRF+LITE ansible-vrf-int1 switch_2 + cisco.dcnm.dcnm_vrf: &conf2 + fabric: "{{ fabric_1 }}" state: deleted config: - vrf_name: ansible-vrf-int1 @@ -163,53 +221,61 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # controller can provide dot1q deploy: true - register: result + register: result_2b + +- name: TEST.2c - DELETED - [debug] print result_2b + debug: + var: result_2b - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[1].MESSAGE == "OK"' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[2].METHOD == "DELETE"' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: DELETED - conf1 - Idempotence - cisco.dcnm.dcnm_vrf: *conf1 - register: result + - 'result_2b.changed == true' + - 'result_2b.response[0].RETURN_CODE == 200' + - 'result_2b.response[1].RETURN_CODE == 200' + - 'result_2b.response[1].MESSAGE == "OK"' + - 'result_2b.response[2].RETURN_CODE == 200' + - 'result_2b.response[2].METHOD == "DELETE"' + - '(result_2b.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2b.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2b.diff[0].attach[0].deploy == false' + - 'result_2b.diff[0].attach[1].deploy == false' + - 'result_2b.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.2d - DELETED - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + +- name: TEST.2e - DELETED - [deleted] conf2 - Idempotence + cisco.dcnm.dcnm_vrf: *conf2 + register: result_2e + +- name: TEST.2f - DELETED - [debug] print result_2e + debug: + var: result_2e - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' - - 'result.diff|length == 0' - -- name: QUERY - sleep for 40 seconds for DCNM to completely update the state - # The vrf lite profile removal returns ok for deployment, but the switch takes time to remove - # the profile so wait for some time before creating a new vrf, else the switch goes into - # OUT-OF-SYNC state - wait_for: - timeout: 40 + - 'result_2e.changed == false' + - 'result_2e.response|length == 0' + - 'result_2e.diff|length == 0' -- name: DELETED - Create, Attach and Deploy new VRF - VLAN/VRF LITE EXTENSION Provided by the User in one switch +- name: TEST.3 - DELETED - [merged] Create, Attach, Deploy VRF+LITE switch_2 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -218,79 +284,102 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.3a - DELETED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_3a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_3a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.3b - DELETED - [debug] print result_3 + debug: + var: result_3 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: DELETED - Delete VRF/VRF LITE - WITHOUT ANY CONFIG ELEMENT - cisco.dcnm.dcnm_vrf: &conf2 - fabric: "{{ test_fabric }}" + - 'result_3.changed == true' + - 'result_3.response[0].RETURN_CODE == 200' + - 'result_3.response[1].RETURN_CODE == 200' + - 'result_3.response[2].RETURN_CODE == 200' + - '(result_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3.diff[0].attach[0].deploy == true' + - 'result_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_3.diff[0].attach[1].ip_address' + - 'result_3.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.3c - DELETED - [deleted] Delete VRF+LITE - empty config element + cisco.dcnm.dcnm_vrf: &conf3 + fabric: "{{ fabric_1 }}" state: deleted config: - register: result + register: result_3c + +- name: TEST.3d - DELETED - [debug] print result_3c + debug: + var: result_3c - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[1].MESSAGE == "OK"' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[2].METHOD == "DELETE"' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: DELETED - conf1 - Idempotence - cisco.dcnm.dcnm_vrf: *conf2 - register: result + - 'result_3c.changed == true' + - 'result_3c.response[0].RETURN_CODE == 200' + - 'result_3c.response[1].RETURN_CODE == 200' + - 'result_3c.response[1].MESSAGE == "OK"' + - 'result_3c.response[2].RETURN_CODE == 200' + - 'result_3c.response[2].METHOD == "DELETE"' + - '(result_3c.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3c.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3c.diff[0].attach[0].deploy == false' + - 'result_3c.diff[0].attach[1].deploy == false' + - 'result_3c.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.3d - DELETED - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + +- name: TEST.3e - DELETED - conf3 - Idempotence + cisco.dcnm.dcnm_vrf: *conf3 + register: result_3e + +- name: TEST.3f - DELETED - [debug] print result_3e + debug: + var: result_3e - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' - - 'result.diff|length == 0' + - 'result_3e.changed == false' + - 'result_3e.response|length == 0' + - 'result_3e.diff|length == 0' ################################################ #### CLEAN-UP ## ################################################ -- name: DELETED - Clean up any existing vrfs +- name: CLEANUP.1 - DELETED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + +- name: CLEANUP.2 - DELETED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/merged.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/merged.yaml index f2d7afbc8..7bacbc501 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/merged.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/merged.yaml @@ -1,16 +1,59 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# - Does not require an interface. +# +# switch_2 +# +# - A vrf-lite capable switch +# +# switch_3 +# +# A NON-vrf-lite capable switch +# +# interface_2a +# +# - Ethernet interface on switch_2 +# - Used to test VRF LITE configuration. +# +# interface_3a +# +# - Ethernet interface on switch_3 +# - Used to verify error when applying a +# VRF LITE extension on a non-vrf-lite +# capable switch. +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" -- name: MERGED - Verify if fabric is deployed. +- name: SETUP.0 - MERGED - [with_items] print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + - "switch_1 : {{ switch_1 }}" + - "switch_2 : {{ switch_2 }}" + - "switch_3 : {{ switch_3 }}" + - "interface_2a : {{ interface_2a }}" + - "interface_3a : {{ interface_3a }}" + +- name: SETUP.1 - MERGED - [dcnm_rest.GET] Verify fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" @@ -20,72 +63,98 @@ that: - 'result.response.DATA != None' -- name: MERGED - Clean up any existing vrfs +- name: SETUP.2 - MERGED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: result_setup_2 + +- name: SETUP.2a - MERGED - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_setup_2.changed == true ############################################### ### MERGED ## ############################################### -- name: MERGED - Create, Attach and Deploy new VRF - VLAN Provided by the User - cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" +- name: TEST.1 - MERGED - [merged] Create, Attach, Deploy VLAN(600)+VRF ansible-vrf-int1 + cisco.dcnm.dcnm_vrf: &conf1 + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 vrf_id: 9008011 vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal - vlan_id: 500 + vlan_id: 600 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_1 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.1a - MERGED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_1a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_1a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.1b - MERGED - [debug] print result_1 + ansible.builtin.debug: + var: result_1 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: MERGED - conf1 - Idempotence - cisco.dcnm.dcnm_vrf: *conf - register: result + - 'result_1.changed == true' + - 'result_1.response[0].RETURN_CODE == 200' + - 'result_1.response[1].RETURN_CODE == 200' + - 'result_1.response[2].RETURN_CODE == 200' + - '(result_1.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_1.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_1.diff[0].attach[0].deploy == true' + - 'result_1.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_1.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_1.diff[0].attach[1].ip_address' + - 'result_1.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.1c - MERGED - [merged] conf1 - Idempotence + cisco.dcnm.dcnm_vrf: *conf1 + register: result_1c + +- name: TEST.1d - MERGED - [debug] print result_1c + ansible.builtin.debug: + var: result_1c - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_1c.changed == false' + - 'result_1c.response|length == 0' -- name: MERGED - Clean up any existing vrfs +- name: TEST.1e - MERGED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: result_1e + +- name: TEST.1f - MERGED - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_1e.changed == true -- name: MERGED - Create, Attach and Deploy new VRF - VLAN Provided by the DCNM +- name: TEST.2 - MERGED - [merged] Create, Attach, Deploy VLAN+VRF (controller provided VLAN) cisco.dcnm.dcnm_vrf: &conf2 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -93,52 +162,66 @@ vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_2 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2a - MERGED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_2a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.2b - MERGED - [debug] print result_2 + ansible.builtin.debug: + var: result_2 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: MERGED - conf2 - Idempotence + - 'result_2.changed == true' + - 'result_2.response[0].RETURN_CODE == 200' + - 'result_2.response[1].RETURN_CODE == 200' + - 'result_2.response[2].RETURN_CODE == 200' + - '(result_2.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2.diff[0].attach[0].deploy == true' + - 'result_2.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_2.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_2.diff[0].attach[1].ip_address' + - 'result_2.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.2c - MERGED - [merged] conf2 - Idempotence cisco.dcnm.dcnm_vrf: *conf2 - register: result + register: result_2c + +- name: TEST.2d - MERGED - [debug] print result_2c + ansible.builtin.debug: + var: result_2c - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_2c.changed == false' + - 'result_2c.response|length == 0' -- name: MERGED - Clean up any existing vrfs +- name: TEST.2e - MERGED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted -- name: MERGED - Create, Attach and Deploy new VRF - VLAN/VRF LITE EXTENSION Provided by the User in one switch +- name: TEST.2f - MERGED - [wait_for] Wait 60 seconds for controller and switch to sync + # While vrf-lite extension was not configured above, we still hit VRF + # OUT-OF-SYNC. Let's see if waiting helps here too. + wait_for: + timeout: 60 + +- name: TEST.3 - MERGED - [merged] Create, Attach, Deploy VLAN+VRF+LITE EXTENSION ansible-vrf-int1 switch_2 (user provided VLAN) cisco.dcnm.dcnm_vrf: &conf3 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -147,67 +230,75 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm deploy: true - register: result + register: result_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.3a - MERGED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_3a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_3a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.3b - MERGED - [debug] print result_3 + ansible.builtin.debug: + var: result_3 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: MERGED - conf3 - Idempotence + - 'result_3.changed == true' + - 'result_3.response[0].RETURN_CODE == 200' + - 'result_3.response[1].RETURN_CODE == 200' + - 'result_3.response[2].RETURN_CODE == 200' + - '(result_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3.diff[0].attach[0].deploy == true' + - 'result_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_3.diff[0].attach[1].ip_address' + - 'result_3.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.3c - MERGED - [merged] conf3 - Idempotence cisco.dcnm.dcnm_vrf: *conf3 - register: result + register: result_3c + +- name: TEST.3d - MERGED - [debug] print result_3c + ansible.builtin.debug: + var: result_3c - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_3c.changed == false' + - 'result_3c.response|length == 0' -- name: MERGED - Clean up any existing vrfs +- name: TEST.3e - MERGED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted -- name: MERGED - sleep for 40 seconds for DCNM to completely remove lite profile - # The vrf lite profile removal returns ok for deployment, but the switch takes time to remove - # the profile so wait for some time before creating a new vrf, else the switch goes into - # OUT-OF-SYNC state +- name: TEST.3f - MERGED - Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state wait_for: - timeout: 40 + timeout: 60 -- name: MERGED - Create, Attach and Deploy new VRF - VRF/VRF LITE EXTENSION Provided by the DCNM - one optional - Rest are populated from DCNM +- name: TEST.4 - MERGED - [merged] Create, Attach, Deploy VLAN+VRF+LITE EXTENSION - (controller provided VLAN) cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -216,40 +307,44 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - - interface: "{{ ansible_int1 }}" # mandatory - - ip_address: "{{ ansible_switch1 }}" + - interface: "{{ interface_2a }}" # mandatory deploy: true - register: result + register: result_4 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.4a - MERGED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_4a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_4a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.4b - MERGED - [debug] print result_4 + ansible.builtin.debug: + var: result_4 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: MERGED - Create, Attach and Deploy new VRF - Update with incorrect VRF ID. + - 'result_4.changed == true' + - 'result_4.response[0].RETURN_CODE == 200' + - 'result_4.response[1].RETURN_CODE == 200' + - 'result_4.response[2].RETURN_CODE == 200' + - '(result_4.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_4.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_4.diff[0].attach[0].deploy == true' + - 'result_4.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_4.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_4.diff[0].attach[1].ip_address' + - 'result_4.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.5 - MERGED - [merged] Create, Attach, Deploy VRF - Update with incorrect VRF ID. cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -257,20 +352,24 @@ vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_5 ignore_errors: yes +- name: TEST.5a - MERGED - [debug] print result_5 + ansible.builtin.debug: + var: result_5 + - assert: that: - - 'result.changed == false' - - '"cant be updated to a different value" in result.msg' + - 'result_5.changed == false' + - '"cannot be updated to a different value" in result_5.msg' -- name: MERGED - Create, Attach and Deploy new VRF - Update with Out of Range VRF ID. +- name: TEST.6 - MERGED - [merged] Create, Attach, Deploy VRF - Update with Out of Range VRF ID. cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -278,20 +377,24 @@ vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_6 ignore_errors: yes +- name: TEST.6a - MERGED - [debug] print result_6 + ansible.builtin.debug: + var: result_6 + - assert: that: - - 'result.changed == false' - - '"The item exceeds the allowed range of max" in result.msg' + - 'result_6.changed == false' + - '"The item exceeds the allowed range of max" in result_6.msg' -- name: MERGED - Create, Attach and Deploy new VRF - Try configuring VRF LITE without required parameter +- name: TEST.7 - MERGED - [merged] Create, Attach, Deploy VRF - VRF LITE missing required parameter cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -300,22 +403,26 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional deploy: true - register: result + register: result_7 ignore_errors: yes +- name: TEST.7a - MERGED - [debug] print result_7 + ansible.builtin.debug: + var: result_7 + - assert: that: - - 'result.changed == false' - - '"Invalid parameters in playbook: interface : Required parameter not found" in result.msg' + - 'result_7.changed == false' + - '"Invalid parameters in playbook: interface : Required parameter not found" in result_7.msg' -- name: MERGED - Create, Attach and Deploy new VRF - Try configuring VRF LITE to a non border switch +- name: TEST.8 - MERGED - [merged] Create, Attach and Deploy VRF - configure VRF LITE on non border switch cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -324,29 +431,37 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" + - ip_address: "{{ switch_3 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_3a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm deploy: true - register: result + register: result_8 ignore_errors: yes +- name: TEST.8a - MERGED - [debug] print result_8 + ansible.builtin.debug: + var: result_8 + - assert: that: - - 'result.changed == false' - - '"VRF LITE cannot be attached to switch" in result.msg' + - 'result_8.changed == false' + - '"VRF LITE cannot be attached to switch" in result_8.msg' ############################################### ### CLEAN-UP ## ############################################### -- name: MERGED - Clean up any existing vrfs +- name: CLEANUP.1 - MERGED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + +- name: CLEANUP.2 - MERGED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/overridden.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/overridden.yaml index 1cee0dca7..8e2de4f0a 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/overridden.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/overridden.yaml @@ -1,16 +1,59 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# +# switch_2 +# +# - A vrf-lite capable switch +# +# interface_2a +# +# - Ethernet interface on switch_2 +# - Used to test VRF LITE configuration. +# +############################################## + +############################################## +## OTHER REQUIREMENTS ## +############################################## +# +# 1. A SUBNET pool for 10.33.0.0/24 must be +# allocated for the fabric_1. +# THIS REQUIREMENT HAS BEEN TEMPORARILY +# REMOVED BY NOT CHANGING THE ipv4_addr and +# neighbor_ipv4 values in the VRF LITE payload. +# TODO: Discuss with Mike. +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" -- name: OVERRIDDEN - Verify if fabric is deployed. +- name: SETUP.0 - OVERRIDDEN - print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + - "switch_1 : {{ switch_1 }}" + - "switch_2 : {{ switch_2 }}" + - "interface_2a : {{ interface_2a }}" + +- name: SETUP.1 - OVERRIDDEN - [dcnn_rest.GET] Verify if fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" @@ -20,14 +63,23 @@ that: - 'result.response.DATA != None' -- name: OVERRIDDEN - Clean up any existing vrfs +- name: SETUP.2 - OVERRIDDEN - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: result_setup_2 -- name: OVERRIDDEN - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: SETUP.2a - OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_setup_2.changed == true + +- name: SETUP.3 OVERRIDDEN - [merged] Create, Attach, Deploy VLAN+VRF ansible-vrf-int1 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -36,42 +88,46 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_setup_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: SETUP.3a - OVERRIDDEN - [debug] print result_setup_3 + debug: + var: result_setup_3 + +- name: SETUP.4 OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_setup_4 until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_setup_4.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_setup_3.changed == true' + - 'result_setup_3.response[0].RETURN_CODE == 200' + - 'result_setup_3.response[1].RETURN_CODE == 200' + - 'result_setup_3.response[2].RETURN_CODE == 200' + - '(result_setup_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_setup_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_setup_3.diff[0].attach[0].deploy == true' + - 'result_setup_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_setup_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_setup_3.diff[0].attach[1].ip_address' + - 'result_setup_3.diff[0].vrf_name == "ansible-vrf-int1"' ############################################### ### OVERRIDDEN ## ############################################### -- name: OVERRIDDEN - Update existing VRF using overridden - delete and create - cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" +- name: TEST.1 - OVERRIDDEN - [overridden] Override existing VRF ansible-vrf-int1 to create new VRF ansible-vrf-int2 + cisco.dcnm.dcnm_vrf: &conf1 + fabric: "{{ fabric_1 }}" state: overridden config: - vrf_name: ansible-vrf-int2 @@ -80,58 +136,74 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_1 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.1a - OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_1a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_1a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.1b - OVERRIDDEN - [debug] print result_1 + debug: + var: result_1 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[3].RETURN_CODE == 200' - - 'result.response[4].RETURN_CODE == 200' - - 'result.response[5].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - '(result.response[4].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[4].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - - 'result.diff[1].attach[0].deploy == false' - - 'result.diff[1].attach[1].deploy == false' - - 'result.diff[1].vrf_name == "ansible-vrf-int1"' - -- name: OVERRIDDEN - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf - register: result + - 'result_1.changed == true' + - 'result_1.response[0].RETURN_CODE == 200' + - 'result_1.response[1].RETURN_CODE == 200' + - 'result_1.response[2].RETURN_CODE == 200' + - 'result_1.response[3].RETURN_CODE == 200' + - 'result_1.response[4].RETURN_CODE == 200' + - 'result_1.response[5].RETURN_CODE == 200' + - 'result_1.response[6].RETURN_CODE == 200' + - 'result_1.response[7].RETURN_CODE == 200' + - '(result_1.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_1.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result_1.response[6].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_1.response[6].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_1.diff[0].attach[0].deploy == true' + - 'result_1.diff[0].attach[1].deploy == true' + - 'result_1.diff[0].vrf_name == "ansible-vrf-int2"' + - 'result_1.diff[1].attach[0].deploy == false' + - 'result_1.diff[1].attach[1].deploy == false' + - 'result_1.diff[1].vrf_name == "ansible-vrf-int1"' + +- name: TEST.1c - OVERRIDDEN - [overridden] conf1 - Idempotence + cisco.dcnm.dcnm_vrf: *conf1 + register: result_1c + +- name: TEST.1d - OVERRIDDEN - [debug] print result_1c + debug: + var: result_1c - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_1c.changed == false' + - 'result_1c.response|length == 0' -- name: OVERRIDDEN - Clean up any existing vrfs +- name: TEST.1f - OVERRIDDEN - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: result_1f + +- name: TEST.1g - OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + when: result_1f.changed == true -- name: OVERRIDDEN - Create, Attach and Deploy new VRF - VRF/VRF LITE EXTENSION Provided by the User in one switch +- name: TEST.2 - OVERRIDDEN - [merged] ansible-vrf-int2 to add vrf_lite extension to switch_2 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int2 @@ -140,46 +212,58 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 1500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int2 # optional - interface: "{{ ansible_int1 }}" # mandatory - ipv4_addr: 10.33.0.2/24 # optional - neighbor_ipv4: 10.33.0.1 # optional + interface: "{{ interface_2a }}" # mandatory + ipv4_addr: 10.33.0.1/24 # optional + neighbor_ipv4: 10.33.0.0 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_2 + +- name: TEST.2a - OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_2.changed == true -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2b - OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2b until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_2b.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.2c - OVERRIDDEN - [debug] print result_2 + debug: + var: result_2 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - -- name: OVERRIDDEN - Update existing VRF LITE using overridden - VRF stays Whereas Ext is modified/overridden - cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + - 'result_2.changed == true' + - 'result_2.response[0].RETURN_CODE == 200' + - 'result_2.response[1].RETURN_CODE == 200' + - 'result_2.response[2].RETURN_CODE == 200' + - '(result_2.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2.diff[0].attach[0].deploy == true' + - 'result_2.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_2.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_2.diff[0].attach[1].ip_address' + - 'result_2.diff[0].vrf_name == "ansible-vrf-int2"' + +- name: TEST.3 - OVERRIDDEN - [overridden] Override vrf_lite extension with new dot1q value + cisco.dcnm.dcnm_vrf: &conf3 + fabric: "{{ fabric_1 }}" state: overridden config: - vrf_name: ansible-vrf-int2 @@ -188,50 +272,85 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 1500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int2 # optional - interface: "{{ ansible_int1 }}" # mandatory - ipv4_addr: 10.33.0.6/24 # optional - neighbor_ipv4: 10.33.0.1 # optional - ipv6_addr: 2010::10:34:0:10/64 # optional - neighbor_ipv6: 2010::10:34:0:7 # optional - dot1q: 21 # dot1q can be got from dcnm + interface: "{{ interface_2a }}" # mandatory + ipv4_addr: 10.33.0.1/24 # optional + neighbor_ipv4: 10.33.0.0 # optional + ipv6_addr: 2010::10:34:0:7/64 # optional + neighbor_ipv6: 2010::10:34:0:3 # optional + dot1q: 21 # optional controller can provide deploy: true - register: result + register: result_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.3a - OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_3.changed == true + +# In step TEST.3, NDFC issues an NX-OS CLI that immediately switches +# from configure profile mode, to configure terminal; vrf context . +# This command results in FAILURE (switch accounting log). Adding a +# wait_for will not help since this all happens within step TEST.6. +# A config-deploy resolves the OUT-OF-SYNC VRF status. +# NOTE: this happens ONLY when Fabric Settings -> Advanced -> Overlay Mode +# is set to "config-profile" (which is the default). It does not happen +# if Overlay Mode is set to "cli". +- name: TEST.3b - OVERRIDDEN - [dcnm_rest.POST] - config-deploy to workaround FAILED/OUT-OF-SYNC VRF status + cisco.dcnm.dcnm_rest: + method: POST + path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}/config-deploy?forceShowRun=false" + when: result_3.changed == true + +- name: TEST.3c - OVERRIDDEN - [wait_for] Wait 60 for vrfStatus == DEPLOYED + wait_for: + timeout: 60 + when: result_3.changed == true + +- name: TEST.3d - OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_3d until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_3d.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.3e - OVERRIDDEN - [debug] print result_3 + debug: + var: result_3 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - -- name: OVERRIDDEN - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf1 - register: result + - 'result_3.changed == true' + - 'result_3.response[0].RETURN_CODE == 200' + - 'result_3.response[1].RETURN_CODE == 200' + - '(result_3.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - 'result_3.diff[0].attach[0].deploy == true' + - 'result_3.diff[0].vrf_name == "ansible-vrf-int2"' + +- name: TEST.3f - OVERRIDDEN - [overridden] conf2 - Idempotence + cisco.dcnm.dcnm_vrf: *conf3 + register: result_3f + +- name: TEST.3g - OVERRIDDEN - [debug] print result_3f + debug: + var: result_3f - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_3f.changed == false' + - 'result_3f.response|length == 0' -- name: OVERRIDDEN - Update existing VRF LITE using overridden - VRF modified and Ext is modified - Old ones deleted - cisco.dcnm.dcnm_vrf: &conf2 - fabric: "{{ test_fabric }}" +- name: TEST.4 - OVERRIDDEN - [overridden] Override ansible-vrf-int2 to create ansible-vrf-int1 with LITE Extension + cisco.dcnm.dcnm_vrf: &conf4 + fabric: "{{ fabric_1 }}" state: overridden config: - vrf_name: ansible-vrf-int1 @@ -240,64 +359,86 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory - ipv4_addr: 10.33.0.3/24 # optional - neighbor_ipv4: 10.33.0.1 # optional + interface: "{{ interface_2a }}" # mandatory + ipv4_addr: 10.33.0.1/24 # optional + neighbor_ipv4: 10.33.0.0 # optional ipv6_addr: 2010::10:34:0:1/64 # optional neighbor_ipv6: 2010::10:34:0:2 # optional - dot1q: 31 # dot1q can be got from dcnm + dot1q: 31 # optional controller can provide deploy: true - register: result + register: result_4 + +- name: TEST.4a - OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_4.changed == true -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.4b - OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_4b until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_4b.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.4c - OVERRIDDEN - [debug] print result_4 + debug: + var: result_4 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[3].RETURN_CODE == 200' - - 'result.response[4].RETURN_CODE == 200' - - 'result.response[5].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - '(result.response[4].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[4].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.response[2].METHOD == "DELETE"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - 'result.diff[1].attach[0].deploy == false' - - 'result.diff[1].attach[1].deploy == false' - - 'result.diff[1].vrf_name == "ansible-vrf-int2"' - -- name: OVERRIDDEN - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf2 - register: result + - 'result_4.changed == true' + - 'result_4.response[0].RETURN_CODE == 200' + - 'result_4.response[1].RETURN_CODE == 200' + - 'result_4.response[2].RETURN_CODE == 200' + - 'result_4.response[3].RETURN_CODE == 200' + - 'result_4.response[4].RETURN_CODE == 200' + - 'result_4.response[5].RETURN_CODE == 200' + - 'result_4.response[6].RETURN_CODE == 200' + - 'result_4.response[7].RETURN_CODE == 200' + - '(result_4.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_4.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result_4.response[6].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_4.response[6].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_4.response[2].METHOD == "DELETE"' + - 'result_4.diff[0].attach[0].deploy == true' + - 'result_4.diff[0].attach[1].deploy == true' + - 'result_4.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_4.diff[1].attach[0].deploy == false' + - 'result_4.diff[1].attach[1].deploy == false' + - 'result_4.diff[1].vrf_name == "ansible-vrf-int2"' + +- name: TEST.4d - OVERRIDDEN - [overridden] conf3 - Idempotence + cisco.dcnm.dcnm_vrf: *conf4 + register: result_4d + +- name: TEST.4e - OVERRIDDEN - [debug] print result_4d + debug: + var: result_4d - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_4d.changed == false' + - 'result_4d.response|length == 0' ############################################## ## CLEAN-UP ## ############################################## -- name: OVERRIDDEN - Clean up any existing vrfs +- name: CLEANUP.1 - OVERRIDDEN - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + +- name: CLEANUP.2 - OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/query.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/query.yaml index 76115d8d5..8a3e7f7ff 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/query.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/query.yaml @@ -1,16 +1,46 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# +# switch_2 +# +# - A vrf-lite capable switch +# +# interface_2a +# +# - Ethernet interface on switch_2 +# - Used to test VRF LITE configuration. +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" -- name: QUERY - Verify if fabric is deployed. +- name: SETUP.0 - QUERY - [with_items] print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + - "switch_1 : {{ switch_1 }}" + - "switch_2 : {{ switch_2 }}" + - "interface_2a : {{ interface_2a }}" + +- name: SETUP.1 - QUERY - [dcnm_rest.GET] Verify if fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" @@ -20,14 +50,23 @@ that: - 'result.response.DATA != None' -- name: QUERY - Clean up any existing vrfs +- name: SETUP.2 - QUERY - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: result_setup_2 + +- name: SETUP.2a - QUERY - Wait 60 seconds for controller and switch to sync + # The vrf lite profile removal returns ok for deployment, but the switch + # takes time to remove the profile so wait for some time before creating + # a new vrf, else the switch goes into OUT-OF-SYNC state + wait_for: + timeout: 60 + when: result_setup_2.changed == true -- name: QUERY - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: SETUP.3 - QUERY - [merged] Create, Attach, Deploy VLAN+VRF ansible-vrf-int1 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -36,42 +75,46 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_setup_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: SETUP.3a - QUERY - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_setup_3a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_setup_3a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: SETUP.3b - QUERY - [debug] print result_setup_3 + ansible.builtin.debug: + var: result_setup_3 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_setup_3.changed == true' + - 'result_setup_3.response[0].RETURN_CODE == 200' + - 'result_setup_3.response[1].RETURN_CODE == 200' + - 'result_setup_3.response[2].RETURN_CODE == 200' + - '(result_setup_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_setup_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_setup_3.diff[0].attach[0].deploy == true' + - 'result_setup_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_setup_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_setup_3.diff[0].attach[1].ip_address' + - 'result_setup_3.diff[0].vrf_name == "ansible-vrf-int1"' -############################################### -### QUERY ## -############################################### +# ############################################### +# ### QUERY ## +# ############################################### -- name: QUERY - Query the VRF +- name: TEST.1 - QUERY - [query] Query VRF ansible-vrf-int1 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query config: - vrf_name: ansible-vrf-int1 @@ -80,47 +123,101 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_1 + +- name: TEST.1b - QUERY - [debug] print result_1 + debug: + var: result_1 - assert: that: - - 'result.changed == false' - - 'result.response[0].parent.vrfName == "ansible-vrf-int1"' - - 'result.response[0].parent.vrfId == 9008011' - - 'result.response[0].parent.vrfStatus == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].vlan == 500' - - 'result.response[0].attach[1].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[1].switchDetailsList[0].vlan == 500' - -- name: QUERY - Clean up existing vrfs + - 'result_1.changed == false' + - 'result_1.response[0].parent.vrfName == "ansible-vrf-int1"' + - 'result_1.response[0].parent.vrfId == 9008011' + - 'result_1.response[0].parent.vrfStatus == "DEPLOYED"' + - 'result_1.response[0].attach[0].switchDetailsList[0].islanAttached == true' + - 'result_1.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_1.response[0].attach[0].switchDetailsList[0].vlan == 500' + - 'result_1.response[0].attach[1].switchDetailsList[0].islanAttached == true' + - 'result_1.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_1.response[0].attach[1].switchDetailsList[0].vlan == 500' + +- name: TEST.2 - QUERY - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted - register: result + register: result_2 + +- name: TEST.2a - QUERY - [debug] print result_2 + debug: + var: result_2 + +- assert: + that: + - 'result_2.changed == true' + - 'result_2.response[0].RETURN_CODE == 200' + - 'result_2.response[1].RETURN_CODE == 200' + - 'result_2.response[1].MESSAGE == "OK"' + - 'result_2.response[2].RETURN_CODE == 200' + - 'result_2.response[2].METHOD == "DELETE"' + - '(result_2.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2.diff[0].attach[0].deploy == false' + - 'result_2.diff[0].attach[1].deploy == false' + - 'result_2.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.2b - QUERY - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + +- name: TEST.3 - QUERY - [merged] Create, Attach, Deploy VLAN+VRF ansible-vrf-int2 + cisco.dcnm.dcnm_vrf: + fabric: "{{ fabric_1 }}" + state: merged + config: + - vrf_name: ansible-vrf-int2 + vrf_id: 9008012 + vrf_template: Default_VRF_Universal + vrf_extension_template: Default_VRF_Extension_Universal + vlan_id: 1500 + attach: + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" + deploy: true + register: result_3 + +- name: TEST.3a - QUERY - [query] Wait for vrfStatus == DEPLOYED + cisco.dcnm.dcnm_vrf: + fabric: "{{ fabric_1 }}" + state: query + register: result_3a + until: + - "result_3a.response[0].parent.vrfStatus is search('DEPLOYED')" + retries: 30 + delay: 2 + +- name: TEST.3b - QUERY - [debug] print result_3 + debug: + var: result_3 - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[1].MESSAGE == "OK"' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[2].METHOD == "DELETE"' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: QUERY - Create, Attach and Deploy new VRF - VRF/VRF LITE EXTENSION Provided by the User in one switch + - 'result_3.changed == true' + - 'result_3.response[0].RETURN_CODE == 200' + - 'result_3.response[1].RETURN_CODE == 200' + - 'result_3.response[2].RETURN_CODE == 200' + - '(result_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3.diff[0].attach[0].deploy == true' + - '"{{ switch_1 }}" in result_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_3.diff[0].attach[1].ip_address' + +- name: TEST.4 - QUERY - [merged] Create, Attach, Deploy VRF+LITE EXTENSION ansible-vrf-int2 on switch_2 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int2 @@ -129,46 +226,46 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 1500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int2 # peer_vrf is mandatory - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_4 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.4a - QUERY - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_4a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_4a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.4b - QUERY - [debug] print result_4 + debug: + var: result_4 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - -- name: QUERY - Query the - VRF/VRF LITE EXTENSION Provided by the User in one switch + - 'result_4.changed == true' + - 'result_4.response[0].RETURN_CODE == 200' + - 'result_4.response[1].RETURN_CODE == 200' + - '(result_4.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - 'result_4.diff[0].attach[0].deploy == true' + - '"{{ switch_2 }}" in result_4.diff[0].attach[0].ip_address' + - 'result_4.diff[0].vrf_name == "ansible-vrf-int2"' + +- name: TEST.5 - QUERY - [query] Query VRF+LITE EXTENSION ansible-vrf-int2 switch_2 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query config: - vrf_name: ansible-vrf-int2 @@ -177,54 +274,62 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 1500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int2 # peer_vrf is mandatory - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_5 + +- name: TEST.5a - QUERY - [debug] print result_5 + debug: + var: result_5 - assert: that: - - 'result.changed == false' - - 'result.response[0].parent.vrfName == "ansible-vrf-int2"' - - 'result.response[0].parent.vrfId == 9008012' - - 'result.response[0].parent.vrfStatus == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].vlan == 1500' - - 'result.response[0].attach[1].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[1].switchDetailsList[0].vlan == 1500' - -- name: QUERY - Query without the config element + - 'result_5.changed == false' + - 'result_5.response[0].parent.vrfName == "ansible-vrf-int2"' + - 'result_5.response[0].parent.vrfId == 9008012' + - 'result_5.response[0].parent.vrfStatus == "DEPLOYED"' + - 'result_5.response[0].attach[0].switchDetailsList[0].islanAttached == true' + - 'result_5.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_5.response[0].attach[0].switchDetailsList[0].vlan == 1500' + - 'result_5.response[0].attach[1].switchDetailsList[0].islanAttached == true' + - 'result_5.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_5.response[0].attach[1].switchDetailsList[0].vlan == 1500' + +- name: TEST.6 - QUERY - [query] Query without the config element cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: result + register: result_6 + +- name: TEST.6a - QUERY - [debug] print result_6 + debug: + var: result_6 - assert: that: - - 'result.changed == false' - - 'result.response[0].parent.vrfName == "ansible-vrf-int2"' - - 'result.response[0].parent.vrfId == 9008012' - - 'result.response[0].parent.vrfStatus == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].vlan == 1500' - - 'result.response[0].attach[1].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[1].switchDetailsList[0].vlan == 1500' - -- name: QUERY - Query the non available VRF + - 'result_6.changed == false' + - 'result_6.response[0].parent.vrfName == "ansible-vrf-int2"' + - 'result_6.response[0].parent.vrfId == 9008012' + - 'result_6.response[0].parent.vrfStatus == "DEPLOYED"' + - 'result_6.response[0].attach[0].switchDetailsList[0].islanAttached == true' + - 'result_6.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_6.response[0].attach[0].switchDetailsList[0].vlan == 1500' + - 'result_6.response[0].attach[1].switchDetailsList[0].islanAttached == true' + - 'result_6.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_6.response[0].attach[1].switchDetailsList[0].vlan == 1500' + +- name: TEST.7 - QUERY - [query] Query non-existent VRF ansible-vrf-int1 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query config: - vrf_name: ansible-vrf-int1 @@ -233,21 +338,29 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_7 + +- name: TEST.7a - QUERY - [debug] print result_7 + debug: + var: result_7 - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_7.changed == false' + - 'result_7.response|length == 0' ############################################### ### CLEAN-UP ## ############################################### -- name: QUERY - Clean up any existing vrfs +- name: CLEANUP.1 - QUERY - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + +- name: CLEANUP.2 - QUERY - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/replaced.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/replaced.yaml index 0a555609c..a0a8254d5 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/replaced.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/replaced.yaml @@ -1,16 +1,48 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-lite capable switch +# +# interface_2a +# +# - Ethernet interface on switch_2 +# - Used to test VRF LITE configuration. +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" -- name: REPLACED - Verify if fabric is deployed. +- name: SETUP.0 - REPLACED - [with_items] print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + - "switch_1 : {{ switch_1 }}" + - "switch_2 : {{ switch_2 }}" + - "interface_2a : {{ interface_2a }}" + +- name: SETUP.1 - REPLACED - [dcnm_rest.GET] Verify if fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" @@ -20,14 +52,18 @@ that: - 'result.response.DATA != None' -- name: REPLACED - Clean up any existing vrfs +- name: SETUP.2 - REPLACED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted -- name: REPLACED - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: SETUP.3 - REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + +- name: SETUP.4 - REPLACED - [merged] Create, Attach, Deploy VLAN+VRF ansible-vrf-int1 cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -36,40 +72,48 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_setup_4 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: SETUP.4a - REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_setup_4a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_setup_4a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: SETUP.4b - REPLACED - [debug] print result_setup_4 + ansible.builtin.debug: + var: result_setup_4 + +- name: SETUP.4c - REPLACED - [debug] print result_setup_4 + debug: + var: result_setup_4 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_setup_4.changed == true' + - 'result_setup_4.response[0].RETURN_CODE == 200' + - 'result_setup_4.response[1].RETURN_CODE == 200' + - 'result_setup_4.response[2].RETURN_CODE == 200' + - '(result_setup_4.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_setup_4.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_setup_4.diff[0].attach[0].deploy == true' + - 'result_setup_4.diff[0].attach[1].deploy == true' + - 'result_setup_4.diff[0].vrf_name == "ansible-vrf-int1"' ############################################### ### REPLACED ## ############################################### -- name: REPLACED - Update existing VRF using replace - delete attachments +- name: TEST.1 - REPLACED - [replaced] Update existing VRF using replace - delete attachments cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -77,40 +121,48 @@ vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 - register: result + register: result_1 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.1a - REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_1a until: - - "query_result.response[0].parent.vrfStatus is search('NA')" + - "result_1a.response[0].parent.vrfStatus is search('NA')" retries: 30 delay: 2 +- name: TEST.1b - REPLACED - [debug] print result_1 + debug: + var: result_1 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: REPLACED - conf1 - Idempotence + - 'result_1.changed == true' + - 'result_1.response[0].RETURN_CODE == 200' + - 'result_1.response[1].RETURN_CODE == 200' + - '(result_1.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_1.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_1.diff[0].attach[0].deploy == false' + - 'result_1.diff[0].attach[1].deploy == false' + - 'result_1.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.1c - REPLACED - conf1 - Idempotence cisco.dcnm.dcnm_vrf: *conf1 - register: result + register: result_1c + +- name: TEST.1d - REPLACED - [debug] print result_1c + debug: + var: result_1c - assert: that: - - 'result.changed == false' + - 'result_1c.changed == false' -- name: REPLACED - Update existing VRF using replace - create attachments +- name: TEST.2 - REPLACED - [replaced] Update existing VRF using replace - create attachments cisco.dcnm.dcnm_vrf: &conf2 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -119,50 +171,62 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_2 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2a - REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_2a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.2b - REPLACED - [debug] print result_2 + debug: + var: result_2 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - 'result.diff[0].attach[0].vlan_id == 500' - - 'result.diff[0].attach[1].vlan_id == 500' - -- name: REPLACED - conf2 - Idempotence + - 'result_2.changed == true' + - 'result_2.response[0].RETURN_CODE == 200' + - 'result_2.response[1].RETURN_CODE == 200' + - '(result_2.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2.diff[0].attach[0].deploy == true' + - 'result_2.diff[0].attach[1].deploy == true' + - 'result_2.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_2.diff[0].attach[0].vlan_id == 500' + - 'result_2.diff[0].attach[1].vlan_id == 500' + +- name: TEST.2c - REPLACED - [replaced] conf2 - Idempotence cisco.dcnm.dcnm_vrf: *conf2 - register: result + register: result_2c + +- name: TEST.2d - REPLACED - [debug] print result_2c + debug: + var: result_2c - assert: that: - - 'result.changed == false' + - 'result_2c.changed == false' -- name: REPLACED - Clean up any existing vrfs +- name: TEST.2e - REPLACED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted -- name: REPLACED - Create, Attach and Deploy new VRF - VLAN/VRF LITE EXTENSION Provided by the User in one switch +- name: TEST.2f - REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + +- name: TEST.3 - REPLACED - [merged] Create, Attach, Deploy VLAN+VRF+LITE switch_2 (user provided VLAN) cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -171,46 +235,50 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_3 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.3a - REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_3a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_3a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.3b - REPLACED - [debug] print result_3 + debug: + var: result_3 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: REPLACED - Update existing VRF LITE extensions using Replace - Delete VRF LITE Attachment Only - cisco.dcnm.dcnm_vrf: &conf3 - fabric: "{{ test_fabric }}" + - 'result_3.changed == true' + - 'result_3.response[0].RETURN_CODE == 200' + - 'result_3.response[1].RETURN_CODE == 200' + - 'result_3.response[2].RETURN_CODE == 200' + - '(result_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3.diff[0].attach[0].deploy == true' + - 'result_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_3.diff[0].attach[1].ip_address' + - 'result_3.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.4 - REPLACED - [replaced] Update existing VRF - Delete VRF LITE Attachment + cisco.dcnm.dcnm_vrf: &conf4 + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -219,47 +287,52 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" + - ip_address: "{{ switch_1 }}" deploy: true - register: result + register: result_4 + +- name: TEST.4a - REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.4b - REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_4b until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_4b.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.4c - REPLACED - [debug] print result_4 + debug: + var: result_4 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - -- name: REPLACED - conf3 - Idempotence - cisco.dcnm.dcnm_vrf: *conf3 - register: result + - 'result_4.changed == true' + - 'result_4.response[0].RETURN_CODE == 200' + - 'result_4.response[1].RETURN_CODE == 200' + - '(result_4.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - 'result_4.diff[0].attach[0].deploy == false' + - 'result_4.diff[0].vrf_name == "ansible-vrf-int1"' + +- name: TEST.4d - REPLACED - conf4 - Idempotence + cisco.dcnm.dcnm_vrf: *conf4 + register: result_4d + +- name: TEST.4d - REPLACED - [debug] print result_4d + debug: + var: result_4d - assert: that: - - 'result.changed == false' + - 'result_4d.changed == false' -- name: QUERY - sleep for 40 seconds for DCNM to completely update the state - # The vrf lite profile removal returns ok for deployment, but the switch takes time to remove - # the profile so wait for some time before creating a new vrf, else the switch goes into - # OUT-OF-SYNC state - wait_for: - timeout: 40 - -- name: REPLACED - Update existing VRF LITE extensions using Replace - Create VRF LITE Attachment Only - cisco.dcnm.dcnm_vrf: &conf4 - fabric: "{{ test_fabric }}" +- name: TEST.5 - REPLACED - [replaced] Update existing VRF - Create VRF LITE Attachment + cisco.dcnm.dcnm_vrf: &conf5 + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -268,52 +341,64 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/30 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional - dot1q: 2 # dot1q can be got from dcnm + dot1q: 2 # optional controller can provide deploy: true - register: result + register: result_5 -- name: Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.5a - REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_5a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_5a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 +- name: TEST.5b - REPLACED - [debug] print result_5 + debug: + var: result_5 + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - 'result.diff[0].attach[0].vlan_id == 500' - -- name: REPLACED - conf4 - Idempotence - cisco.dcnm.dcnm_vrf: *conf4 - register: result + - 'result_5.changed == true' + - 'result_5.response[0].RETURN_CODE == 200' + - 'result_5.response[1].RETURN_CODE == 200' + - '(result_5.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - 'result_5.diff[0].attach[0].deploy == true' + - 'result_5.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_5.diff[0].attach[0].vlan_id == 500' + +- name: TEST.5c - REPLACED - conf5 - Idempotence + cisco.dcnm.dcnm_vrf: *conf5 + register: result_5c + +- name: TEST.5d - REPLACED - [debug] print result_5c + debug: + var: result_5c - assert: that: - - 'result.changed == false' + - 'result_5c.changed == false' ############################################### ### CLEAN-UP ## ############################################### -- name: REPLACED - Clean up any existing vrfs +- name: CLEANUP.1 - REPLACED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + +- name: CLEANUP.2 - REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/sanity.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/sanity.yaml index 46def14db..5b31ecd11 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/sanity.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/sanity.yaml @@ -1,49 +1,76 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-lite capable switch +# - Does not require an interface +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" tags: - sanity - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" tags: - sanity -- name: SANITY- MERGED - Verify if fabric is deployed. +- name: SETUP.0 - SANITY - [with_items] print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + - "switch_1 : {{ switch_1 }}" + - "switch_2 : {{ switch_2 }}" + +- name: SETUP.1 - SANITY - Verify if fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" - register: result + register: result_setup_1 tags: sanity - assert: that: - - 'result.response.DATA != None' + - 'result_setup_1.response.DATA != None' tags: sanity -- name: SANITY- MERGED - Clean up any existing vrfs +- name: SETUP.2 - SANITY - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted tags: sanity -- name: SANITY- Pause for 20 seconds for NDFC to sync - ansible.builtin.pause: - seconds: 20 +- name: SETUP.3 - SANITY - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 tags: sanity ############################################### ### MERGED ## ############################################### -- name: SANITY- MERGED - Create, Attach and Deploy new VRF - VLAN Provided by the User - cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" +- name: TEST.1 - SANITY MERGED - [merged] Create, Attach, Deploy VLAN+VRF - VLAN Provided by the User + cisco.dcnm.dcnm_vrf: &conf1 + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -52,67 +79,77 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_1 tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.1a - SANITY MERGED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_1a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_1a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.1b - SANITY MERGED - [debug] print result_1 + ansible.builtin.debug: + var: result_1 + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_1.changed == true' + - 'result_1.response[0].RETURN_CODE == 200' + - 'result_1.response[1].RETURN_CODE == 200' + - 'result_1.response[2].RETURN_CODE == 200' + - '(result_1.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_1.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_1.diff[0].attach[0].deploy == true' + - 'result_1.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_1.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_1.diff[0].attach[1].ip_address' + - 'result_1.diff[0].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.1c - SANITY MERGED - conf1 - Idempotence + cisco.dcnm.dcnm_vrf: *conf1 + register: result_1c tags: sanity -- name: SANITY- MERGED - conf1 - Idempotence - cisco.dcnm.dcnm_vrf: *conf - register: result +- name: TEST.1d - SANITY MERGED - [debug] print result_1c + ansible.builtin.debug: + var: result_1c tags: sanity - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_1c.changed == false' + - 'result_1c.response|length == 0' tags: sanity -- name: SANITY- MERGED - Clean up any existing vrfs +- name: TEST.1e - SANITY MERGED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted tags: sanity -- name: SANITY- Pause for 20 seconds for NDFC to sync - ansible.builtin.pause: - seconds: 20 +- name: TEST.1f - SANITY MERGED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 tags: sanity ############################################### ### REPLACED ## ############################################### -- name: SANITY- REPLACED - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: TEST.2 - SANITY REPLACED - [merged] Create, Attach, Deploy VLAN+VRF Provided by the User cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -121,39 +158,44 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_2 tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2a - SANITY REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_2a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.2b - SANITY REPLACED - [debug] print result_2 + ansible.builtin.debug: + var: result_2 + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - tags: sanity - -- name: SANITY- REPLACED - Update existing VRF using replace - delete attachments - cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + - 'result_2.changed == true' + - 'result_2.response[0].RETURN_CODE == 200' + - 'result_2.response[1].RETURN_CODE == 200' + - 'result_2.response[2].RETURN_CODE == 200' + - '(result_2.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2.diff[0].attach[0].deploy == true' + - 'result_2.diff[0].attach[1].deploy == true' + - 'result_2.diff[0].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.2c - SANITY REPLACED - [replaced] Update existing VRF - delete attachments + cisco.dcnm.dcnm_vrf: &conf2c + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -161,45 +203,60 @@ vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 - register: result + register: result_2c tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2d - SANITY REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + tags: sanity + +- name: TEST.2e - SANITY REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2e until: - - "query_result.response[0].parent.vrfStatus is search('NA')" + - "result_2e.response[0].parent.vrfStatus is search('NA')" retries: 30 delay: 2 tags: sanity +- name: TEST.2f - SANITY REPLACED - [debug] print result_2c + ansible.builtin.debug: + var: result_2c + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_2c.changed == true' + - 'result_2c.response[0].RETURN_CODE == 200' + - 'result_2c.response[1].RETURN_CODE == 200' + - '(result_2c.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2c.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2c.diff[0].attach[0].deploy == false' + - 'result_2c.diff[0].attach[1].deploy == false' + - 'result_2c.diff[0].vrf_name == "ansible-vrf-int1"' tags: sanity -- name: SANITY- REPLACED - conf1 - Idempotence - cisco.dcnm.dcnm_vrf: *conf1 - register: result +- name: TEST.2g - SANITY REPLACED - conf2c - Idempotence + cisco.dcnm.dcnm_vrf: *conf2c + register: result_2g + tags: sanity + +- name: TEST.2h - SANITY REPLACED - [debug] print result_2g + ansible.builtin.debug: + var: result_2g tags: sanity - assert: that: - - 'result.changed == false' + - 'result_2g.changed == false' tags: sanity -- name: SANITY- REPLACED - Update existing VRF using replace - create attachments - cisco.dcnm.dcnm_vrf: &conf2 - fabric: "{{ test_fabric }}" +- name: TEST.2i - SANITY REPLACED - [replaced] Update existing VRF - create attachments + cisco.dcnm.dcnm_vrf: &conf2i + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -208,65 +265,80 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_2i tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.2j - SANITY REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + tags: sanity + +- name: TEST.2k - SANITY REPLACED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_2k until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_2k.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.2l - SANITY REPLACED - [debug] print result_2i + ansible.builtin.debug: + var: result_2i + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - 'result.diff[0].attach[0].vlan_id == 500' - - 'result.diff[0].attach[1].vlan_id == 500' + - 'result_2i.changed == true' + - 'result_2i.response[0].RETURN_CODE == 200' + - 'result_2i.response[1].RETURN_CODE == 200' + - '(result_2i.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_2i.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_2i.diff[0].attach[0].deploy == true' + - 'result_2i.diff[0].attach[1].deploy == true' + - 'result_2i.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_2i.diff[0].attach[0].vlan_id == 500' + - 'result_2i.diff[0].attach[1].vlan_id == 500' + tags: sanity + +- name: TEST.2m - SANITY REPLACED - [replaced] conf2i - Idempotence + cisco.dcnm.dcnm_vrf: *conf2i + register: result_2m tags: sanity -- name: SANITY- REPLACED - conf2 - Idempotence - cisco.dcnm.dcnm_vrf: *conf2 - register: result +- name: TEST.2n - SANITY REPLACED - [debug] print result_2m + ansible.builtin.debug: + var: result_2m tags: sanity - assert: that: - - 'result.changed == false' + - 'result_2m.changed == false' tags: sanity -- name: SANITY- REPLACED - Clean up any existing vrfs +- name: TEST.2o - SANITY REPLACED - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted tags: sanity -- name: SANITY- Pause for 20 seconds for NDFC to sync - ansible.builtin.pause: - seconds: 20 +- name: TEST.2p - SANITY REPLACED - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 tags: sanity ############################################### ### OVERRIDDEN ## ############################################### -- name: SANITY- OVERRIDDEN - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: TEST.3 - SANITY OVERRIDDEN - [merged] Create, Attach, Deploy VLAN+VRF - Provided by the User cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -275,41 +347,46 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_3 tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.3a - SANITY OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_3a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_3a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.3b - SANITY OVERRIDDEN - [debug] print result_3 + ansible.builtin.debug: + var: result_3 + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - tags: sanity - -- name: SANITY- OVERRIDDEN - Update existing VRF using overridden - delete and create - cisco.dcnm.dcnm_vrf: &conf3 - fabric: "{{ test_fabric }}" + - 'result_3.changed == true' + - 'result_3.response[0].RETURN_CODE == 200' + - 'result_3.response[1].RETURN_CODE == 200' + - 'result_3.response[2].RETURN_CODE == 200' + - '(result_3.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3.diff[0].attach[0].deploy == true' + - 'result_3.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_3.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_3.diff[0].attach[1].ip_address' + - 'result_3.diff[0].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.3c - SANITY OVERRIDDEN - [overridden] Update existing VRF - delete and create + cisco.dcnm.dcnm_vrf: &conf3c + fabric: "{{ fabric_1 }}" state: overridden config: - vrf_name: ansible-vrf-int2 @@ -318,73 +395,83 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_3c tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.3d - SANITY OVERRIDDEN - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_3d until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_3d.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.3e - SANITY OVERRIDDEN - [debug] print result_3c + ansible.builtin.debug: + var: result_3c + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[3].RETURN_CODE == 200' - - 'result.response[4].RETURN_CODE == 200' - - 'result.response[5].RETURN_CODE == 200' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - '(result.response[4].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[4].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - - 'result.diff[1].attach[0].deploy == false' - - 'result.diff[1].attach[1].deploy == false' - - 'result.diff[1].vrf_name == "ansible-vrf-int1"' - tags: sanity - -- name: SANITY- OVERRIDDEN - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf3 - register: result + - 'result_3c.changed == true' + - 'result_3c.response[0].RETURN_CODE == 200' + - 'result_3c.response[1].RETURN_CODE == 200' + - 'result_3c.response[2].RETURN_CODE == 200' + - 'result_3c.response[3].RETURN_CODE == 200' + - 'result_3c.response[4].RETURN_CODE == 200' + - 'result_3c.response[5].RETURN_CODE == 200' + - '(result_3c.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3c.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - '(result_3c.response[4].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_3c.response[4].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_3c.diff[0].attach[0].deploy == true' + - 'result_3c.diff[0].attach[1].deploy == true' + - 'result_3c.diff[0].vrf_name == "ansible-vrf-int2"' + - 'result_3c.diff[1].attach[0].deploy == false' + - 'result_3c.diff[1].attach[1].deploy == false' + - 'result_3c.diff[1].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.3f - SANITY OVERRIDDEN - [overridden] conf3c - Idempotence + cisco.dcnm.dcnm_vrf: *conf3c + register: result_3f + tags: sanity + +- name: TEST.3g - SANITY OVERRIDDEN - [debug] print result_3f + ansible.builtin.debug: + var: result_3f tags: sanity - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_3f.changed == false' + - 'result_3f.response|length == 0' tags: sanity -- name: SANITY- OVERRIDDEN - Clean up any existing vrfs +- name: TEST.3h - SANITY OVERRIDDEN - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted tags: sanity -- name: SANITY- Pause for 20 seconds for NDFC to sync - ansible.builtin.pause: - seconds: 20 +- name: TEST.3i - SANITY OVERRIDDEN - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 tags: sanity ############################################### ### QUERY ## ############################################### -- name: SANITY- QUERY - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: TEST.4 - SANITY QUERY - [merged] Create, Attach, Deploy VLAN+VRF - Provided by the User cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -393,41 +480,46 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_4 tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.4a - SANITY QUERY - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_4a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_4a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.4b - SANITY QUERY - [debug] print result_4 + ansible.builtin.debug: + var: result_4 + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - tags: sanity - -- name: SANITY- QUERY - Query the VRF + - 'result_4.changed == true' + - 'result_4.response[0].RETURN_CODE == 200' + - 'result_4.response[1].RETURN_CODE == 200' + - 'result_4.response[2].RETURN_CODE == 200' + - '(result_4.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_4.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_4.diff[0].attach[0].deploy == true' + - 'result_4.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_4.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_4.diff[0].attach[1].ip_address' + - 'result_4.diff[0].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.4c - SANITY QUERY - [query] Query the VRF cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query config: - vrf_name: ansible-vrf-int1 @@ -436,45 +528,49 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_4c + tags: sanity + +- name: TEST.4d - SANITY QUERY - [debug] print result_4c + ansible.builtin.debug: + var: result_4c tags: sanity - assert: that: - - 'result.changed == false' - - 'result.response[0].parent.vrfName == "ansible-vrf-int1"' - - 'result.response[0].parent.vrfId == 9008011' - - 'result.response[0].parent.vrfStatus == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[0].switchDetailsList[0].vlan == 500' - - 'result.response[0].attach[1].switchDetailsList[0].islanAttached == true' - - 'result.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' - - 'result.response[0].attach[1].switchDetailsList[0].vlan == 500' - tags: sanity - -- name: SANITY- QUERY - Clean up existing vrfs + - 'result_4c.changed == false' + - 'result_4c.response[0].parent.vrfName == "ansible-vrf-int1"' + - 'result_4c.response[0].parent.vrfId == 9008011' + - 'result_4c.response[0].parent.vrfStatus == "DEPLOYED"' + - 'result_4c.response[0].attach[0].switchDetailsList[0].islanAttached == true' + - 'result_4c.response[0].attach[0].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_4c.response[0].attach[0].switchDetailsList[0].vlan == 500' + - 'result_4c.response[0].attach[1].switchDetailsList[0].islanAttached == true' + - 'result_4c.response[0].attach[1].switchDetailsList[0].lanAttachedState == "DEPLOYED"' + - 'result_4c.response[0].attach[1].switchDetailsList[0].vlan == 500' + tags: sanity + +- name: TEST.4e - SANITY QUERY - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted - register: result tags: sanity -- name: SANITY- Pause for 20 seconds for NDFC to sync - ansible.builtin.pause: - seconds: 20 +- name: TEST.4f - SANITY QUERY - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 tags: sanity ############################################### ### DELETED ## ############################################### -- name: SANITY- DELETED - Create, Attach and Deploy new VRF - VLAN Provided by the User +- name: TEST.5 - SANITY DELETED - Create, Attach, Deploy VLAN+VRF - Provided by the User cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -483,73 +579,88 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true - register: result + register: result_5 tags: sanity -- name: SANITY- Query fabric state until vrfStatus transitions to DEPLOYED state +- name: TEST.5a - SANITY DELETED - [query] Wait for vrfStatus == DEPLOYED cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query - register: query_result + register: result_5a until: - - "query_result.response[0].parent.vrfStatus is search('DEPLOYED')" + - "result_5a.response[0].parent.vrfStatus is search('DEPLOYED')" retries: 30 delay: 2 tags: sanity +- name: TEST.5b - SANITY DELETED - [debug] print result_5 + ansible.builtin.debug: + var: result_5 + tags: sanity + - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[2].RETURN_CODE == 200' - - '(result.response[1].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == true' - - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - tags: sanity - -- name: SANITY- DELETED - Delete VRF using deleted state - cisco.dcnm.dcnm_vrf: &conf4 - fabric: "{{ test_fabric }}" + - 'result_5.changed == true' + - 'result_5.response[0].RETURN_CODE == 200' + - 'result_5.response[1].RETURN_CODE == 200' + - 'result_5.response[2].RETURN_CODE == 200' + - '(result_5.response[1].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_5.response[1].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_5.diff[0].attach[0].deploy == true' + - 'result_5.diff[0].attach[1].deploy == true' + - '"{{ switch_1 }}" in result_5.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result_5.diff[0].attach[1].ip_address' + - 'result_5.diff[0].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.5c - SANITY DELETED - [deleted] Delete the VRF + cisco.dcnm.dcnm_vrf: &conf5c + fabric: "{{ fabric_1 }}" state: deleted config: - vrf_name: ansible-vrf-int1 vrf_id: 9008011 vrf_template: Default_VRF_Universal vrf_extension_template: Default_VRF_Extension_Universal - register: result + register: result_5c + tags: sanity + +- name: TEST.5d - SANITY DELETED - [debug] print result_5c + ansible.builtin.debug: + var: result_5c tags: sanity - assert: that: - - 'result.changed == true' - - 'result.response[0].RETURN_CODE == 200' - - 'result.response[1].RETURN_CODE == 200' - - 'result.response[1].MESSAGE == "OK"' - - 'result.response[2].RETURN_CODE == 200' - - 'result.response[2].METHOD == "DELETE"' - - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - 'result.diff[0].attach[0].deploy == false' - - 'result.diff[0].attach[1].deploy == false' - - 'result.diff[0].vrf_name == "ansible-vrf-int1"' + - 'result_5c.changed == true' + - 'result_5c.response[0].RETURN_CODE == 200' + - 'result_5c.response[1].RETURN_CODE == 200' + - 'result_5c.response[1].MESSAGE == "OK"' + - 'result_5c.response[2].RETURN_CODE == 200' + - 'result_5c.response[2].METHOD == "DELETE"' + - '(result_5c.response[0].DATA|dict2items)[0].value == "SUCCESS"' + - '(result_5c.response[0].DATA|dict2items)[1].value == "SUCCESS"' + - 'result_5c.diff[0].attach[0].deploy == false' + - 'result_5c.diff[0].attach[1].deploy == false' + - 'result_5c.diff[0].vrf_name == "ansible-vrf-int1"' + tags: sanity + +- name: TEST.5e - SANITY DELETED - conf5c - Idempotence + cisco.dcnm.dcnm_vrf: *conf5c + register: result_5e tags: sanity -- name: SANITY- DELETED - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf4 - register: result +- name: TEST.5f - SANITY DELETED - [debug] print result_5e + ansible.builtin.debug: + var: result_5e tags: sanity - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' + - 'result_5e.changed == false' + - 'result_5e.response|length == 0' + - 'result_5e.diff|length == 0' tags: sanity - - 'result.diff|length == 0' diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/deleted_vrf_all.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/deleted_vrf_all.yaml index 16f4222ec..5748884fc 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/deleted_vrf_all.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/deleted_vrf_all.yaml @@ -1,13 +1,32 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-capable switch +# - Does not require an interface +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" - name: DELETE_ALL - Verify if fabric is deployed. @@ -22,7 +41,7 @@ - name: DELETE_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted ############################################### @@ -31,7 +50,7 @@ - name: DELETE_ALL - Create, Attach and Deploy new VRF with all values cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -55,14 +74,14 @@ bgp_password: "74657374" bgp_passwd_encrypt: 7 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -80,13 +99,13 @@ - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - name: DELETE_ALL - Clean existing vrfs cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted register: result @@ -98,8 +117,8 @@ - 'result.response[2].RETURN_CODE == 200' - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - '(result.response[0].DATA|dict2items)[1].value == "SUCCESS"' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - name: DELETE_ALL - conf - Idempotence @@ -117,5 +136,5 @@ - name: DELETE_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/merged_vrf_all.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/merged_vrf_all.yaml index 0f3f69c87..733e3d0a4 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/merged_vrf_all.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/merged_vrf_all.yaml @@ -1,13 +1,32 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-capable switch +# - Does not require an interface +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" - name: MERGED_ALL - Verify if fabric is deployed. @@ -22,7 +41,7 @@ - name: MERGED_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted ############################################### @@ -31,7 +50,7 @@ - name: MERGED_ALL - Create, Attach and Deploy new VRF with all values cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -55,14 +74,14 @@ bgp_password: "74657374" bgp_passwd_encrypt: 7 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -80,8 +99,8 @@ - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - name: MERGED_ALL - conf - Idempotence @@ -99,5 +118,5 @@ - name: MERGED_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/overridden_vrf_all.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/overridden_vrf_all.yaml index f5d518ba1..fd7cb618a 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/overridden_vrf_all.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/overridden_vrf_all.yaml @@ -1,13 +1,32 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-capable switch +# - Does not require an interface +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" - name: OVERRIDDEN_ALL - Verify if fabric is deployed. @@ -22,7 +41,7 @@ - name: OVERRIDDEN_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted ############################################### @@ -31,7 +50,7 @@ - name: OVERRIDDEN_ALL - Create, Attach and Deploy new VRF with all values cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -55,14 +74,14 @@ bgp_password: "74657374" bgp_passwd_encrypt: 7 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -80,8 +99,8 @@ - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - name: OVERRIDDEN_ALL - conf - Idempotence @@ -95,7 +114,7 @@ - name: OVERRIDDEN_ALL - Override a existing VRF with a new one cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: overridden config: - vrf_name: ansible-vrf-int2 @@ -119,14 +138,14 @@ bgp_password: "74657374" bgp_passwd_encrypt: 7 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -151,10 +170,10 @@ - 'result.diff[0].attach[1].deploy == true' - 'result.diff[1].attach[0].deploy == false' - 'result.diff[1].attach[1].deploy == false' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[1].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[1].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[1].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[1].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - 'result.diff[1].vrf_name == "ansible-vrf-int1"' @@ -173,5 +192,5 @@ - name: OVERRIDDEN_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/replaced_vrf_all.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/replaced_vrf_all.yaml index 010bca5d8..7645e5f00 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/replaced_vrf_all.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/replaced_vrf_all.yaml @@ -1,13 +1,32 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-capable switch +# - Does not require an interface +# +# switch_2 +# +# - A vrf-capable switch +# - Does not require an interface +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" - name: REPLACED_ALL - Verify if fabric is deployed. @@ -22,7 +41,7 @@ - name: REPLACED_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted ############################################### @@ -31,7 +50,7 @@ - name: REPLACED_ALL - Create, Attach and Deploy new VRF with all values cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -55,14 +74,14 @@ bgp_password: "74657374" bgp_passwd_encrypt: 7 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -80,8 +99,8 @@ - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" or "{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" or "{{ ansible_switch1 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_1 }}" or "{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" or "{{ switch_1 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - name: REPLACED_ALL - conf - Idempotence @@ -95,7 +114,7 @@ - name: REPLACED_ALL - Replace VRF with all values cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -119,14 +138,14 @@ bgp_password: "74657374" bgp_passwd_encrypt: 7 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + - ip_address: "{{ switch_2 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -156,5 +175,5 @@ - name: REPLACED_ALL - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/scale.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/scale.yaml index 76b82792c..f9a25fc74 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/scale.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/scale.yaml @@ -1,66 +1,103 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# +# - A VXLAN_EVPN fabric +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" -- name: SCALE - Verify if fabric is deployed. +- name: SETUP.0 - SCALE - [with_items] print vars + ansible.builtin.debug: + var: item + with_items: + - "fabric_1 : {{ fabric_1 }}" + +- name: SETUP.1 - SCALE - Verify if fabric is deployed. cisco.dcnm.dcnm_rest: method: GET path: "{{ rest_path }}" - register: result + register: setup_result_1 - assert: that: - - 'result.response.DATA != None' + - 'setup_result_1.response.DATA != None' -- name: SCALE - Clean up any existing vrfs +- name: SETUP.2 - SCALE - [deleted] Delete all VRFs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted + register: setup_result_2 + tags: sanity + +- name: SETUP.3 - SCALE - [wait_for] Wait 60 seconds for controller and switch to sync + wait_for: + timeout: 60 + when: setup_result_2.changed == false + tags: sanity -- name: Dummy set fact for leaf_attach_list +- name: SETUP.4 - SCALE - [set_fact] Dummy set fact for leaf_attach_list set_fact: leaf_vrf_attach: [] -- name: Build list of VRFs to be deployed +- name: SETUP.5 - SCALE - [set_fact] Build list of VRFs to be deployed set_fact: vrfs_list: "{{ vrfs_list|default([]) + [{ 'vrf_name': 'TEST_VRF%03d' | format(item), 'deploy': 'no', 'vrf_id': (item | int + 50000) | int, 'vlan_id': (item | int + 2000) | int, 'attach': leaf_vrf_attach }] }}" loop: '{{ range(0, 800) | list }}' -- name: Push all VRFs to DCNM +- name: TEST.1 - SCALE - [merged] Push all VRFs to the controller cisco.dcnm.dcnm_vrf: - fabric: '{{ test_fabric }}' + fabric: '{{ fabric_1 }}' state: merged config: '{{ vrfs_list }}' register: result -- name: SCALE - Clean up existing vrfs - cisco.dcnm.dcnm_vrf: &conf - fabric: "{{ test_fabric }}" +- name: TEST.1a - SCALE - [wait_for] Wait 60 seconds + wait_for: + timeout: 60 + tags: sanity + +- name: TEST.2 - SCALE - [deleted] Delete all VRFs + cisco.dcnm.dcnm_vrf: &conf2 + fabric: "{{ fabric_1 }}" state: deleted + register: result_2 -- name: SCALE - conf - Idempotence - cisco.dcnm.dcnm_vrf: *conf - register: result +- name: TEST.2a - SCALE - [debug] print result_2 + ansible.builtin.debug: + var: result_2 - assert: that: - - 'result.changed == false' - - 'result.response|length == 0' - - 'result.diff|length == 0' + - 'result_2.changed == true' + +- name: TEST.2b - SCALE - [deleted] conf2 - Idempotence + cisco.dcnm.dcnm_vrf: *conf2 + register: result_2b + +- name: TEST.2c - SCALE - [debug] print result_2b + ansible.builtin.debug: + var: result_2b + +- assert: + that: + - 'result_2b.changed == false' + - 'result_2b.response|length == 0' + - 'result_2b.diff|length == 0' ################################################ #### CLEAN-UP ## ################################################ - -- name: SCALE - Clean up any existing vrfs - cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" - state: deleted +# No CLEANUP required diff --git a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/vrf_lite.yaml b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/vrf_lite.yaml index 6ad41f906..38bb637fe 100644 --- a/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/vrf_lite.yaml +++ b/tests/integration/targets/dcnm_vrf/tests/dcnm/self-contained-tests/vrf_lite.yaml @@ -1,13 +1,46 @@ +############################################## +## REQUIRED VARS ## +############################################## +# fabric_1 +# A VXLAN_EVPN fabric +# +# switch_1 +# +# - A vrf-lite capable switch +# +# switch_2 +# +# - A vrf-lite capable switch +# +# switch_3 +# +# - A vrf capable switch +# - switch_3 does not require any interfaces +# +# interface_1a +# +# - 1st interface on switch_1 +# +# interface_2a +# +# - 1st interface on switch_2 +# +# interface_2b +# +# - 2nd interface on switch_2 +# +############################################## + ############################################## ## SETUP ## ############################################## - set_fact: - rest_path: "/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/rest/control/fabrics/{{ fabric_1 }}" when: controller_version == "11" - set_fact: - rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ test_fabric }}" + rest_path: "/appcenter/cisco/ndfc/api/v1/lan-fabric/rest/control/fabrics/{{ fabric_1 }}" when: controller_version >= "12" - name: MERGED - Verify if fabric is deployed. @@ -22,7 +55,7 @@ - name: MERGED - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted - name: VRF LITE - sleep for 40 seconds for DCNM to completely update the state @@ -38,7 +71,7 @@ - name: VRF LITE- Create, Attach and Deploy new VRF - VLAN/VRF LITE EXTENSION Provided by the User in one switch cisco.dcnm.dcnm_vrf: &conf1 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -47,22 +80,22 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/24 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm + - ip_address: "{{ switch_3 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -80,10 +113,10 @@ - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch1 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_3 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - '"{{ ansible_int1 }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' + - '"{{ interface_2a }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"ansible-vrf-int1" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"10.33.0.2/24" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' @@ -98,7 +131,7 @@ - name: VRF LITE- Attach and Deploy second VRF LITE EXTENSION Provided by the User in one switch cisco.dcnm.dcnm_vrf: &conf2 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int1 @@ -107,29 +140,29 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/24 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int2 }}" # mandatory + interface: "{{ interface_2b }}" # mandatory ipv4_addr: 20.33.0.2/24 # optional neighbor_ipv4: 20.33.0.1 # optional ipv6_addr: 3010::10:34:0:7/64 # optional neighbor_ipv6: 3010::10:34:0:3 # optional dot1q: 21 # dot1q can be got from dcnm - - ip_address: "{{ ansible_switch1 }}" + - ip_address: "{{ switch_3 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -144,12 +177,12 @@ - 'result.response[1].RETURN_CODE == 200' - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result.diff[0].attach[0].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - '"{{ ansible_int1 }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' + - '"{{ interface_2a }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"ansible-vrf-int1" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"10.33.0.2/24" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - - '"{{ ansible_int2 }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' + - '"{{ interface_2b }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"20.33.0.2/24" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - name: VRF LITE - conf2 - Idempotence @@ -163,7 +196,7 @@ - name: VRF LITE- Replace VRF LITE Attachment and Deploy by the User in one switch cisco.dcnm.dcnm_vrf: &conf3 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: replaced config: - vrf_name: ansible-vrf-int1 @@ -172,22 +205,22 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 500 attach: - - ip_address: "{{ ansible_switch1 }}" - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/24 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm + - ip_address: "{{ switch_3 }}" deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -202,9 +235,9 @@ - 'result.response[1].RETURN_CODE == 200' - '(result.response[0].DATA|dict2items)[0].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_2 }}" in result.diff[0].attach[0].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int1"' - - '"{{ ansible_int1 }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' + - '"{{ interface_2a }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"ansible-vrf-int1" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"10.33.0.2/24" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' @@ -219,7 +252,7 @@ - name: VRF LITE- Override VRF and VRF LITE EXTENSION Provided by the User cisco.dcnm.dcnm_vrf: &conf4 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: overridden config: - vrf_name: ansible-vrf-int2 @@ -228,17 +261,17 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 400 attach: - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/24 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int2 }}" # mandatory + interface: "{{ interface_2b }}" # mandatory ipv4_addr: 20.33.0.2/24 # optional neighbor_ipv4: 20.33.0.1 # optional ipv6_addr: 3010::10:34:0:7/64 # optional @@ -249,7 +282,7 @@ - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -271,15 +304,15 @@ - 'result.diff[0].attach[0].deploy == true' - 'result.diff[1].attach[0].deploy == false' - 'result.diff[1].attach[1].deploy == false' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch1 }}" in result.diff[1].attach[0].ip_address' - - '"{{ ansible_switch2 }}" in result.diff[1].attach[1].ip_address' + - '"{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_3 }}" in result.diff[1].attach[0].ip_address' + - '"{{ switch_2 }}" in result.diff[1].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - 'result.diff[1].vrf_name == "ansible-vrf-int1"' - - '"{{ ansible_int1 }}" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' + - '"{{ interface_2a }}" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - '"ansible-vrf-int1" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - '"10.33.0.2/24" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - - '"{{ ansible_int2 }}" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' + - '"{{ interface_2b }}" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - '"20.33.0.2/24" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - name: VRF LITE - conf4 - Idempotence @@ -293,7 +326,7 @@ - name: VRF LITE - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted - name: VRF LITE - sleep for 40 seconds for DCNM to completely update the state @@ -305,7 +338,7 @@ - name: VRF LITE- Create, Attach and Deploy new VRF - VLAN/VRF LITE EXTENSION Provided by the User in multiple switch cisco.dcnm.dcnm_vrf: &conf5 - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: merged config: - vrf_name: ansible-vrf-int2 @@ -314,37 +347,37 @@ vrf_extension_template: Default_VRF_Extension_Universal vlan_id: 400 attach: - - ip_address: "{{ ansible_switch2 }}" + - ip_address: "{{ switch_1 }}" + vrf_lite: + - peer_vrf: ansible-vrf-int3 # optional + interface: "{{ interface_1a }}" # mandatory + ipv4_addr: 40.33.0.2/24 # optional + neighbor_ipv4: 40.33.0.1 # optional + ipv6_addr: 5010::10:34:0:7/64 # optional + neighbor_ipv6: 5010::10:34:0:3 # optional + dot1q: 4 # dot1q can be got from dcnm + - ip_address: "{{ switch_2 }}" vrf_lite: - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int1 }}" # mandatory + interface: "{{ interface_2a }}" # mandatory ipv4_addr: 10.33.0.2/24 # optional neighbor_ipv4: 10.33.0.1 # optional ipv6_addr: 2010::10:34:0:7/64 # optional neighbor_ipv6: 2010::10:34:0:3 # optional dot1q: 2 # dot1q can be got from dcnm - peer_vrf: ansible-vrf-int1 # optional - interface: "{{ ansible_int2 }}" # mandatory + interface: "{{ interface_2b }}" # mandatory ipv4_addr: 20.33.0.2/24 # optional neighbor_ipv4: 20.33.0.1 # optional ipv6_addr: 3010::10:34:0:7/64 # optional neighbor_ipv6: 3010::10:34:0:3 # optional dot1q: 21 # dot1q can be got from dcnm - - ip_address: "{{ ansible_switch3 }}" - vrf_lite: - - peer_vrf: ansible-vrf-int3 # optional - interface: "{{ ansible_int3 }}" # mandatory - ipv4_addr: 40.33.0.2/24 # optional - neighbor_ipv4: 40.33.0.1 # optional - ipv6_addr: 5010::10:34:0:7/64 # optional - neighbor_ipv6: 5010::10:34:0:3 # optional - dot1q: 4 # dot1q can be got from dcnm deploy: true register: result - name: Query fabric state until vrfStatus transitions to DEPLOYED state cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: query register: query_result until: @@ -362,16 +395,16 @@ - '(result.response[1].DATA|dict2items)[1].value == "SUCCESS"' - 'result.diff[0].attach[0].deploy == true' - 'result.diff[0].attach[1].deploy == true' - - '"{{ ansible_switch2 }}" in result.diff[0].attach[0].ip_address' - - '"{{ ansible_switch3 }}" in result.diff[0].attach[1].ip_address' + - '"{{ switch_2 }}" in result.diff[0].attach[0].ip_address' + - '"{{ switch_1 }}" in result.diff[0].attach[1].ip_address' - 'result.diff[0].vrf_name == "ansible-vrf-int2"' - - '"{{ ansible_int3 }}" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' + - '"{{ interface_1a }}" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - '"ansible-vrf-int3" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - '"40.33.0.2/24" in query_result.response[0].attach[0].switchDetailsList[0].extensionValues' - - '"{{ ansible_int1 }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' + - '"{{ interface_2a }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"ansible-vrf-int1" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"10.33.0.2/24" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - - '"{{ ansible_int2 }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' + - '"{{ interface_2b }}" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - '"20.33.0.2/24" in query_result.response[0].attach[1].switchDetailsList[0].extensionValues' - name: VRF LITE - conf5 - Idempotence @@ -389,5 +422,5 @@ - name: VRF LITE - Clean up any existing vrfs cisco.dcnm.dcnm_vrf: - fabric: "{{ test_fabric }}" + fabric: "{{ fabric_1 }}" state: deleted diff --git a/tests/unit/modules/dcnm/fixtures/dcnm_vrf.json b/tests/unit/modules/dcnm/fixtures/dcnm_vrf.json index 34c2f081d..08b8e2671 100644 --- a/tests/unit/modules/dcnm/fixtures/dcnm_vrf.json +++ b/tests/unit/modules/dcnm/fixtures/dcnm_vrf.json @@ -357,10 +357,12 @@ "vrfName": "test_vrf_1", "lanAttachList": [ { - "lanAttachState": "DEPLOYED" + "lanAttachState": "DEPLOYED", + "isLanAttached": false }, { - "lanAttachState": "DEPLOYED" + "lanAttachState": "DEPLOYED", + "isLanAttached": false } ] } @@ -393,10 +395,12 @@ "vrfName": "test_vrf_1", "lanAttachList": [ { - "lanAttachState": "NA" + "lanAttachState": "NA", + "isLanAttached": false }, { - "lanAttachState": "NA" + "lanAttachState": "NA", + "isLanAttached": false } ] } @@ -1097,6 +1101,36 @@ } ] }, + "mock_pools_top_down_vrf_vlan": { + "ERROR": "", + "RETURN_CODE": 200, + "MESSAGE":"OK", + "DATA": [ + { + "allocatedFlag": true, + "allocatedIp": "201", + "allocatedOn": 1734051260507, + "allocatedScopeValue": "FDO211218HH", + "entityName": "VRF_1", + "entityType": "Device", + "hierarchicalKey": "0", + "id": 36407, + "ipAddress": "172.22.150.104", + "resourcePool": { + "dynamicSubnetRange": null, + "fabricName": "f1", + "hierarchicalKey": "f1", + "id": 0, + "overlapAllowed": false, + "poolName": "TOP_DOWN_VRF_VLAN", + "poolType": "ID_POOL", + "targetSubnet": 0, + "vrfName": "VRF_1" + }, + "switchName": "cvd-1313-leaf" + } + ] + }, "mock_vrf_lite_obj": { "RETURN_CODE":200, "METHOD":"GET", diff --git a/tests/unit/modules/dcnm/test_dcnm_vrf.py b/tests/unit/modules/dcnm/test_dcnm_vrf.py index 580ab5ad8..372b090ee 100644 --- a/tests/unit/modules/dcnm/test_dcnm_vrf.py +++ b/tests/unit/modules/dcnm/test_dcnm_vrf.py @@ -17,14 +17,14 @@ __metaclass__ = type +import copy from unittest.mock import patch -# from units.compat.mock import patch - from ansible_collections.cisco.dcnm.plugins.modules import dcnm_vrf -from .dcnm_module import TestDcnmModule, set_module_args, loadPlaybookData -import copy +from .dcnm_module import TestDcnmModule, loadPlaybookData, set_module_args + +# from units.compat.mock import patch class TestDcnmVrfModule(TestDcnmModule): @@ -127,6 +127,7 @@ def init_data(self): self.test_data.get("mock_vrf_attach_lite_object") ) self.mock_vrf_lite_obj = copy.deepcopy(self.test_data.get("mock_vrf_lite_obj")) + self.mock_pools_top_down_vrf_vlan = copy.deepcopy(self.test_data.get("mock_pools_top_down_vrf_vlan")) def setUp(self): super(TestDcnmVrfModule, self).setUp() @@ -437,6 +438,7 @@ def load_fixtures(self, response=None, device=""): self.mock_vrf_attach_object_del_not_ready, self.mock_vrf_attach_object_del_ready, self.delete_success_resp, + self.mock_pools_top_down_vrf_vlan, self.blank_data, self.attach_success_resp2, self.deploy_success_resp, @@ -472,6 +474,7 @@ def load_fixtures(self, response=None, device=""): self.mock_vrf_attach_object_del_not_ready, self.mock_vrf_attach_object_del_ready, self.delete_success_resp, + self.mock_pools_top_down_vrf_vlan, ] elif "delete_std_lite" in self._testMethodName: @@ -519,6 +522,7 @@ def load_fixtures(self, response=None, device=""): obj1, obj2, self.delete_success_resp, + self.mock_pools_top_down_vrf_vlan, ] elif "query" in self._testMethodName: @@ -587,7 +591,7 @@ def test_dcnm_vrf_blank_fabric(self): result = self.execute_module(changed=False, failed=True) self.assertEqual( result.get("msg"), - "Fabric test_fabric missing on DCNM or does not have any switches", + "Fabric test_fabric missing on the controller or does not have any switches", ) def test_dcnm_vrf_get_have_failure(self): @@ -595,7 +599,7 @@ def test_dcnm_vrf_get_have_failure(self): dict(state="merged", fabric="test_fabric", config=self.playbook_config) ) result = self.execute_module(changed=False, failed=True) - self.assertEqual(result.get("msg"), "Fabric test_fabric not present on DCNM") + self.assertEqual(result.get("msg"), "Fabric test_fabric not present on the controller") def test_dcnm_vrf_merged_redeploy(self): set_module_args( @@ -707,7 +711,7 @@ def test_dcnm_vrf_merged_with_incorrect_vrfid(self): result = self.execute_module(changed=False, failed=True) self.assertEqual( result.get("msg"), - "vrf_id for vrf:test_vrf_1 cant be updated to a different value", + "DcnmVrf.diff_for_create: vrf_id for vrf test_vrf_1 cannot be updated to a different value", ) def test_dcnm_vrf_merged_lite_invalidrole(self): @@ -719,10 +723,10 @@ def test_dcnm_vrf_merged_lite_invalidrole(self): ) ) result = self.execute_module(changed=False, failed=True) - self.assertEqual( - result["msg"], - "VRF LITE cannot be attached to switch 10.10.10.225 with role leaf", - ) + msg = "DcnmVrf.update_attach_params_extension_values: " + msg += "VRF LITE cannot be attached to switch 10.10.10.225 " + msg += "with role leaf" + self.assertEqual(result["msg"], msg) def test_dcnm_vrf_merged_with_update(self): set_module_args( @@ -1025,10 +1029,10 @@ def test_dcnm_vrf_override_with_deletions(self): self.assertEqual(result["response"][1]["DATA"]["status"], "") self.assertEqual(result["response"][1]["RETURN_CODE"], self.SUCCESS_RETURN_CODE) self.assertEqual( - result["response"][4]["DATA"]["test-vrf-2--XYZKSJHSMK2(leaf2)"], "SUCCESS" + result["response"][5]["DATA"]["test-vrf-2--XYZKSJHSMK2(leaf2)"], "SUCCESS" ) self.assertEqual( - result["response"][4]["DATA"]["test-vrf-2--XYZKSJHSMK3(leaf3)"], "SUCCESS" + result["response"][5]["DATA"]["test-vrf-2--XYZKSJHSMK3(leaf3)"], "SUCCESS" ) def test_dcnm_vrf_lite_override_with_deletions(self): @@ -1144,9 +1148,8 @@ def test_dcnm_vrf_delete_failure(self): dict(state="deleted", fabric="test_fabric", config=self.playbook_config) ) result = self.execute_module(changed=False, failed=True) - self.assertEqual( - result["msg"]["response"][2], "Deletion of vrfs test_vrf_1 has failed" - ) + msg = "DcnmVrf.push_diff_delete: Deletion of vrfs test_vrf_1 has failed" + self.assertEqual(result["msg"]["response"][2], msg) def test_dcnm_vrf_query(self): set_module_args( @@ -1270,16 +1273,16 @@ def test_dcnm_vrf_validation(self): ) ) result = self.execute_module(changed=False, failed=True) - self.assertEqual( - result["msg"], "ip_address is mandatory under attach parameters" - ) + msg = "DcnmVrf.validate_input: " + msg += "vrf_name is mandatory under vrf parameters," + msg += "ip_address is mandatory under attach parameters" + self.assertEqual(result["msg"], msg) def test_dcnm_vrf_validation_no_config(self): set_module_args(dict(state="merged", fabric="test_fabric", config=[])) result = self.execute_module(changed=False, failed=True) - self.assertEqual( - result["msg"], "config: element is mandatory for this state merged" - ) + msg = "DcnmVrf.validate_input: config element is mandatory for merged state" + self.assertEqual(result["msg"], msg) def test_dcnm_vrf_12check_mode(self): self.version = 12