Skip to content

Commit

Permalink
Merge pull request #176 from CiscoDevNet/2.3.0
Browse files Browse the repository at this point in the history
Release 2.3.0
  • Loading branch information
mikewiebe authored Oct 28, 2022
2 parents 5e35e07 + 3010c90 commit 2db873e
Show file tree
Hide file tree
Showing 7 changed files with 282 additions and 2 deletions.
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@
All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/).

## [2.3.0] - 2022-10-28

### Added

* Added the ability to configure the `multicast_group_address` to the `dcnm_network` module

## [2.2.0] - 2022-10-14

### Added
Expand Down Expand Up @@ -188,6 +194,7 @@ The Ansible Cisco Data Center Network Manager (DCNM) collection includes modules
* cisco.dcnm.dcnm_network - Add and remove Networks from a DCNM managed VXLAN fabric.
* cisco.dcnm.dcnm_interface - DCNM Ansible Module for managing interfaces.
[2.3.0]: https://github.com/CiscoDevNet/ansible-dcnm/compare/2.2.0...2.3.0
[2.2.0]: https://github.com/CiscoDevNet/ansible-dcnm/compare/2.1.1...2.2.0
[2.1.1]: https://github.com/CiscoDevNet/ansible-dcnm/compare/2.1.0...2.1.1
[2.1.0]: https://github.com/CiscoDevNet/ansible-dcnm/compare/2.0.1...2.1.0
Expand Down
18 changes: 17 additions & 1 deletion docs/cisco.dcnm.dcnm_network_module.rst
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,22 @@ Parameters
<div>Configured MTU value should be in range 68-9216</div>
</td>
</tr>
<tr>
<td class="elbow-placeholder"></td>
<td colspan="2">
<div class="ansibleOptionAnchor" id="parameter-"></div>
<b>multicast_group_address</b>
<a class="ansibleOptionLink" href="#parameter-" title="Permalink to this option"></a>
<div style="font-size: small">
<span style="color: purple">string</span>
</div>
</td>
<td>
</td>
<td>
<div>The multicast IP address for the network</div>
</td>
</tr>
<tr>
<td class="elbow-placeholder"></td>
<td colspan="2">
Expand Down Expand Up @@ -538,7 +554,7 @@ Parameters
Examples
--------

.. code-block:: yaml
.. code-block:: yaml+jinja

# This module supports the following states:
#
Expand Down
2 changes: 1 addition & 1 deletion galaxy.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
namespace: cisco
name: dcnm
version: 2.2.0
version: 2.3.0
readme: README.md
authors:
- Shrishail Kariyappanavar <nkshrishail>
Expand Down
32 changes: 32 additions & 0 deletions plugins/modules/dcnm_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,11 @@
- Configured ID value should be in range 0-1023
type: int
required: false
multicast_group_address:
description:
- The multicast IP address for the network
type: str
required: false
attach:
description:
- List of network attachment details
Expand Down Expand Up @@ -681,6 +686,7 @@ def diff_for_create(self, want, have):
dhcp2_vrf_changed = False
dhcp3_vrf_changed = False
dhcp_loopback_changed = False
multicast_group_address_changed = False

if want.get("networkId") and want["networkId"] != have["networkId"]:
self.module.fail_json(
Expand Down Expand Up @@ -729,6 +735,8 @@ def diff_for_create(self, want, have):
dhcp3_vrf_have = json_to_dict_have.get("vrfDhcp3", "")
dhcp_loopback_want = json_to_dict_want.get("loopbackId", "")
dhcp_loopback_have = json_to_dict_have.get("loopbackId", "")
multicast_group_address_want = json_to_dict_want.get("mcastGroup", "")
multicast_group_address_have = json_to_dict_have.get("mcastGroup", "")
if vlanId_have != "":
vlanId_have = int(vlanId_have)
tag_want = json_to_dict_want.get("tag", "")
Expand Down Expand Up @@ -766,6 +774,7 @@ def diff_for_create(self, want, have):
or dhcp2_vrf_have != dhcp2_vrf_want
or dhcp3_vrf_have != dhcp3_vrf_want
or dhcp_loopback_have != dhcp_loopback_want
or multicast_group_address_have != multicast_group_address_want
):
# The network updates with missing networkId will have to use existing
# networkId from the instance of the same network on DCNM.
Expand Down Expand Up @@ -801,6 +810,8 @@ def diff_for_create(self, want, have):
dhcp3_vrf_changed = True
if dhcp_loopback_have != dhcp_loopback_want:
dhcp_loopback_changed = True
if multicast_group_address_have != multicast_group_address_want:
multicast_group_address_changed = True

want.update({"networkId": have["networkId"]})
create = want
Expand Down Expand Up @@ -855,6 +866,8 @@ def diff_for_create(self, want, have):
dhcp3_vrf_changed = True
if dhcp_loopback_have != dhcp_loopback_want:
dhcp_loopback_changed = True
if multicast_group_address_have != multicast_group_address_want:
multicast_group_address_changed = True

want.update({"networkId": have["networkId"]})
create = want
Expand All @@ -876,6 +889,7 @@ def diff_for_create(self, want, have):
dhcp2_vrf_changed,
dhcp3_vrf_changed,
dhcp_loopback_changed,
multicast_group_address_changed,
)

def update_create_params(self, net):
Expand Down Expand Up @@ -928,6 +942,7 @@ def update_create_params(self, net):
"vrfDhcp2": net.get("dhcp_srvr2_vrf", ""),
"vrfDhcp3": net.get("dhcp_srvr3_vrf", ""),
"loopbackId": net.get("dhcp_loopback_id", ""),
"mcastGroup": net.get("multicast_group_address", ""),
}

if template_conf["vlanId"] is None:
Expand All @@ -948,6 +963,8 @@ def update_create_params(self, net):
template_conf["vrfDhcp3"] = ""
if template_conf["loopbackId"] is None:
template_conf["loopbackId"] = ""
if template_conf["mcastGroup"] is None:
template_conf["mcastGroup"] = ""

net_upd.update({"networkTemplateConfig": json.dumps(template_conf)})

Expand Down Expand Up @@ -1031,6 +1048,7 @@ def get_have(self):
"vrfDhcp2": json_to_dict.get("vrfDhcp2", ""),
"vrfDhcp3": json_to_dict.get("vrfDhcp3", ""),
"loopbackId": json_to_dict.get("loopbackId", ""),
"mcastGroup": json_to_dict.get("mcastGroup", ""),
}

net.update({"networkTemplateConfig": json.dumps(t_conf)})
Expand Down Expand Up @@ -1066,6 +1084,7 @@ def get_have(self):
"vrfDhcp2": json_to_dict.get("vrfDhcp2", ""),
"vrfDhcp3": json_to_dict.get("vrfDhcp3", ""),
"loopbackId": json_to_dict.get("loopbackId", ""),
"mcastGroup": json_to_dict.get("mcastGroup", ""),
}

l2net.update({"networkTemplateConfig": json.dumps(t_conf)})
Expand Down Expand Up @@ -1493,6 +1512,7 @@ def get_diff_merge(self, replace=False):
dhcp2_vrf_changed = {}
dhcp3_vrf_changed = {}
dhcp_loopback_changed = {}
multicast_group_address_changed = {}

for want_c in self.want_create:
found = False
Expand All @@ -1517,6 +1537,7 @@ def get_diff_merge(self, replace=False):
dhcp2_vrf_chg,
dhcp3_vrf_chg,
dhcp_loopbk_chg,
mcast_grp_chg,
) = self.diff_for_create(want_c, have_c)
gw_changed.update({want_c["networkName"]: gw_chg})
tg_changed.update({want_c["networkName"]: tg_chg})
Expand All @@ -1534,6 +1555,9 @@ def get_diff_merge(self, replace=False):
dhcp_loopback_changed.update(
{want_c["networkName"]: dhcp_loopbk_chg}
)
multicast_group_address_changed.update(
{want_c["networkName"]: mcast_grp_chg}
)
if diff:
diff_create_update.append(diff)
break
Expand Down Expand Up @@ -1648,6 +1672,7 @@ def get_diff_merge(self, replace=False):
or dhcp2_vrf_changed.get(want_a["networkName"], False)
or dhcp3_vrf_changed.get(want_a["networkName"], False)
or dhcp_loopback_changed.get(want_a["networkName"], False)
or multicast_group_address_changed.get(want_a["networkName"], False)
):
dep_net = want_a["networkName"]

Expand Down Expand Up @@ -1735,6 +1760,7 @@ def format_diff(self):
found_c.update({"dhcp_srvr2_vrf": json_to_dict.get("vrfDhcp2", "")})
found_c.update({"dhcp_srvr3_vrf": json_to_dict.get("vrfDhcp3", "")})
found_c.update({"dhcp_loopback_id": json_to_dict.get("loopbackId", "")})
found_c.update({"multicast_group_address": json_to_dict.get("mcastGroup", "")})
found_c.update({"attach": []})

del found_c["fabric"]
Expand Down Expand Up @@ -2052,6 +2078,7 @@ def push_to_remote(self, is_rollback=False):
"vrfDhcp2": json_to_dict.get("vrfDhcp2", ""),
"vrfDhcp3": json_to_dict.get("vrfDhcp3", ""),
"loopbackId": json_to_dict.get("loopbackId", ""),
"mcastGroup": json_to_dict.get("mcastGroup", ""),
}

net.update({"networkTemplateConfig": json.dumps(t_conf)})
Expand Down Expand Up @@ -2145,6 +2172,7 @@ def validate_input(self):
dhcp_srvr2_vrf=dict(type="str", length_max=32),
dhcp_srvr3_vrf=dict(type="str", length_max=32),
dhcp_loopback_id=dict(type="int", range_min=0, range_max=1023),
multicast_group_address=dict(type="ipv4", default=""),
)
att_spec = dict(
ip_address=dict(required=True, type="str"),
Expand Down Expand Up @@ -2208,6 +2236,7 @@ def validate_input(self):
dhcp_srvr2_vrf=dict(type="str", length_max=32),
dhcp_srvr3_vrf=dict(type="str", length_max=32),
dhcp_loopback_id=dict(type="int", range_min=0, range_max=1023),
multicast_group_address=dict(type="ipv4", default=""),
)
att_spec = dict(
ip_address=dict(required=True, type="str"),
Expand Down Expand Up @@ -2459,6 +2488,9 @@ def dcnm_update_network_information(self, want, have, cfg):
if cfg.get("dhcp_loopback_id", None) is None:
json_to_dict_want["loopbackId"] = json_to_dict_have["loopbackId"]

if cfg.get("multicast_group_address", None) is None:
json_to_dict_want["mcastGroup"] = json_to_dict_have["mcastGroup"]

want.update({"networkTemplateConfig": json.dumps(json_to_dict_want)})

def update_want(self):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
- name: Setup - Remove all existing networks
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: deleted

- name: Test mcast parameters for state merged
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: merged
config:
- net_name: ansible-net13
vrf_name: ansible-vrf-int1
net_id: 7009
vlan_id: 3505
gw_ip_subnet: '152.168.30.1/24'
mtu_l3intf: 7600
arp_suppress: False
int_desc: 'test interface'
is_l2only: False
vlan_name: testvlan
multicast_group_address: '224.5.5.5'
attach:
- ip_address: "{{ ansible_switch1 }}"
ports: []
deploy: True
register: result

- assert:
that:
- 'result.changed == true'

- name: Query fabric state until networkStatus transitions to DEPLOYED state
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: query
register: result
until:
- "result.response[0].parent.networkStatus is search('DEPLOYED')"
retries: 30
delay: 2

- assert:
that:
- "result.response[0].parent.networkTemplateConfig.mcastGroup is search('224.5.5.5')"
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
- name: Setup - Remove all existing networks
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: deleted

- name: Create network with initial DEFAULT mcast parameter value
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: merged
config:
- net_name: ansible-net13
vrf_name: ansible-vrf-int1
net_id: 7009
vlan_id: 3505
gw_ip_subnet: '152.168.30.1/24'
mtu_l3intf: 7600
arp_suppress: False
int_desc: 'test interface'
is_l2only: False
vlan_name: testvlan
dhcp_loopback_id: 0
attach:
- ip_address: "{{ ansible_switch1 }}"
ports: []
deploy: True
register: result

- name: Query fabric state until networkStatus transitions to DEPLOYED state
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: query
register: result
until:
- "result.response[0].parent.networkStatus is search('DEPLOYED')"
retries: 30
delay: 2

- assert:
that:
- "result.response[0].parent.networkTemplateConfig.mcastGroup is search('239.1.1.1')"

- name: Change mcast parameter values
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: merged
config:
- net_name: ansible-net13
vrf_name: ansible-vrf-int1
net_id: 7009
vlan_id: 3505
gw_ip_subnet: '152.168.30.1/24'
mtu_l3intf: 7600
arp_suppress: False
int_desc: 'test interface'
is_l2only: False
vlan_name: testvlan
multicast_group_address: '230.55.24.155'
attach:
- ip_address: "{{ ansible_switch1 }}"
ports: []
deploy: True
register: result

- name: Query fabric state until networkStatus transitions to DEPLOYED state
cisco.dcnm.dcnm_network:
fabric: "{{ ansible_it_fabric }}"
state: query
register: result
until:
- "result.response[0].parent.networkStatus is search('DEPLOYED')"
retries: 30
delay: 2

- assert:
that:
- "result.response[0].parent.networkTemplateConfig.mcastGroup is search('230.55.24.155')"
Loading

0 comments on commit 2db873e

Please sign in to comment.