diff -Nru neutron-14.0.2/AUTHORS neutron-14.0.3/AUTHORS --- neutron-14.0.2/AUTHORS 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/AUTHORS 2019-10-22 19:48:02.000000000 +0000 @@ -9,6 +9,7 @@ Abishek Subramanian Adam Gandelman Adam Harwell +Adam Spiers Adelina Tuvenie Adin Scannell Adit Sarfaty @@ -427,6 +428,7 @@ KAWAI Hiroaki KIYOHIRO ADACHI Kahou Lei +Kailun Qin Kailun Qin Kaiwei Fan Kanzhe Jiang @@ -503,6 +505,7 @@ Marga Millet Margaret Frances Mark Doffman +Mark Goddard Mark McClain Mark McClain Mark McClain @@ -689,6 +692,7 @@ Rosario Di Somma Rossella Sblendido Rossella Sblendido +RoyKing Rudrajit Tapadar Rui Zang Russell Bryant @@ -741,6 +745,7 @@ Sean Mooney Sean Mooney Sean Redmond +Sebastian Lohff Senhua Huang Serge Maskalik Sergey Belous @@ -895,6 +900,7 @@ Yalei Wang YanXingan Yang JianFeng +Yang Li Yang Youseok Yang Yu Yang Yu @@ -1151,6 +1157,7 @@ zhangzs zhhuabj zhiyuan_cai +zhouhenglc zhsun zhufl zoukeke@cmss.chinamobile.com diff -Nru neutron-14.0.2/ChangeLog neutron-14.0.3/ChangeLog --- neutron-14.0.2/ChangeLog 2019-07-01 02:56:18.000000000 +0000 +++ neutron-14.0.3/ChangeLog 2019-10-22 19:48:01.000000000 +0000 @@ -1,6 +1,59 @@ CHANGES ======= +14.0.3 +------ + +* switch to the newly created opensuse-15 nodeset +* Handle ports assigned to routers without routerports +* fixed\_configured=True when Add/Remove port IPs +* OVS flows for custom ethertypes must be on EGRESS +* DVR: Modify DVR flows to allow ARP requests to hit ARP Responder table +* DVR: Cleanup ml2 dvr portbindings on migration +* Change ip\_lib decorators order +* Avoid unnecessary operation of ovsdb and flows +* Fix creation of vlan network with segmentation\_id set to 0 +* Add info log about ready DHCP config for ports +* Increase timeouts for OVSDB in functional tests +* Check the namespace is ready in test\_mtu\_update tests +* Fix bulk port functioning with requested security groups +* Create \_mech\_context before delete to avoid race +* Disable "of\_inactivity\_probe" in fullstack tests +* Do not use privsep context when listing the namespaces in fullstack +* Delay HA router transition from "backup" to "master" +* Refactor the L3 agent batch notifier +* Veth pair "IFLA\_LINK" populated since kernel 4.15.0-60-generic +* ML2 plugin: extract and postpone limit in port query +* Use created subnet in port generator in "test\_port\_ip\_update\_revises" +* Increase TestDhcpAgentHA.agent\_down\_time to 30 seconds +* Increase number of retries in \_process\_trunk\_subport\_bindings +* Initialize phys bridges before setup\_rpc +* Populate binding levels when concurrent ops fail +* Make sure the port still in port map when prepare\_port\_filter +* Fix sort issue in test\_dhcp\_agent\_scheduler.test\_filter\_bindings +* [DVR] Add lock during creation of FIP agent gateway port +* Clear skb mark on encapsulating packets +* fix update port bug +* Check for agent restarted after checking for DVR port +* Fix default RPC worker count +* Retry trunk status updates failing with StaleDataError +* Don't crash ovs agent during reconfigure of phys bridges +* fix NetworkSegmentRange OVO entry point +* Use --bind-dynamic with dnsmasq instead of --bind-interfaces +* Yield control to other greenthreads while processing trusted ports +* Ignore first local port update notification +* Fix list security groups performance with RBAC +* Limit max ports per rpc for dhcp\_ready\_on\_ports() +* Fix bulk port binding +* Refactor qos\_plugin.\_extend\_port\_resource\_request +* Add qos\_network\_policy\_id to Port OVO +* Don't match input interface in POSTROUTING table +* Add custom ethertype processing +* Treat networks shared by RBAC in same way as shared with all tenants +* Import "Manage Networking service quotas" admin guide +* Turn CIDR in query filter into proper subnet +* Stop OVS agent before starting it again + 14.0.2 ------ diff -Nru neutron-14.0.2/debian/changelog neutron-14.0.3/debian/changelog --- neutron-14.0.2/debian/changelog 2019-07-03 19:37:10.000000000 +0000 +++ neutron-14.0.3/debian/changelog 2019-12-13 12:59:21.000000000 +0000 @@ -1,8 +1,14 @@ -neutron (2:14.0.2-0ubuntu1~cloud0) bionic-stein; urgency=medium +neutron (2:14.0.3-0ubuntu1~cloud0) bionic-stein; urgency=medium * New upstream release for the Ubuntu Cloud Archive. - -- Openstack Ubuntu Testing Bot Wed, 03 Jul 2019 19:37:10 +0000 + -- Openstack Ubuntu Testing Bot Fri, 13 Dec 2019 12:59:21 +0000 + +neutron (2:14.0.3-0ubuntu1) disco; urgency=medium + + * New stable point release for OpenStack Stein (LP: #1853319). + + -- Sahid Orentino Ferdjaoui Tue, 03 Dec 2019 11:46:34 +0100 neutron (2:14.0.2-0ubuntu1) disco; urgency=medium diff -Nru neutron-14.0.2/doc/source/admin/ops-quotas.rst neutron-14.0.3/doc/source/admin/ops-quotas.rst --- neutron-14.0.2/doc/source/admin/ops-quotas.rst 1970-01-01 00:00:00.000000000 +0000 +++ neutron-14.0.3/doc/source/admin/ops-quotas.rst 2019-10-22 19:46:03.000000000 +0000 @@ -0,0 +1,355 @@ +================================ +Manage Networking service quotas +================================ + +A quota limits the number of available resources. A default +quota might be enforced for all projects. When you try to create +more resources than the quota allows, an error occurs: + +.. code-block:: console + + $ openstack network create test_net + Quota exceeded for resources: ['network'] + +Per-project quota configuration is also supported by the quota +extension API. See :ref:`cfg_quotas_per_tenant` for details. + +Basic quota configuration +~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the Networking default quota mechanism, all projects have +the same quota values, such as the number of resources that a +project can create. + +The quota value is defined in the OpenStack Networking +``/etc/neutron/neutron.conf`` configuration file. This example shows the +default quota values: + +.. code-block:: ini + + [quotas] + # number of networks allowed per tenant, and minus means unlimited + quota_network = 10 + + # number of subnets allowed per tenant, and minus means unlimited + quota_subnet = 10 + + # number of ports allowed per tenant, and minus means unlimited + quota_port = 50 + + # default driver to use for quota checks + quota_driver = neutron.quota.ConfDriver + +OpenStack Networking also supports quotas for L3 resources: +router and floating IP. Add these lines to the +``quotas`` section in the ``/etc/neutron/neutron.conf`` file: + +.. code-block:: ini + + [quotas] + # number of routers allowed per tenant, and minus means unlimited + quota_router = 10 + + # number of floating IPs allowed per tenant, and minus means unlimited + quota_floatingip = 50 + +OpenStack Networking also supports quotas for security group +resources: number of security groups and the number of rules for +each security group. Add these lines to the +``quotas`` section in the ``/etc/neutron/neutron.conf`` file: + +.. code-block:: ini + + [quotas] + # number of security groups per tenant, and minus means unlimited + quota_security_group = 10 + + # number of security rules allowed per tenant, and minus means unlimited + quota_security_group_rule = 100 + +.. _cfg_quotas_per_tenant: + +Configure per-project quotas +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenStack Networking also supports per-project quota limit by +quota extension API. + +.. todo:: This document needs to be migrated to using ``openstack`` commands + rather than the deprecated ``neutron`` commands. + +Use these commands to manage per-project quotas: + +neutron quota-delete + Delete defined quotas for a specified project + +neutron quota-list + Lists defined quotas for all projects + +neutron quota-show + Shows quotas for a specified project + +neutron quota-default-show + Show default quotas for a specified tenant + +neutron quota-update + Updates quotas for a specified project + +Only users with the ``admin`` role can change a quota value. By default, +the default set of quotas are enforced for all projects, so no +:command:`quota-create` command exists. + +#. Configure Networking to show per-project quotas + + Set the ``quota_driver`` option in the ``/etc/neutron/neutron.conf`` file. + + .. code-block:: ini + + quota_driver = neutron.db.quota_db.DbQuotaDriver + + When you set this option, the output for Networking commands shows ``quotas``. + +#. List Networking extensions. + + To list the Networking extensions, run this command: + + .. code-block:: console + + $ openstack extension list --network + + The command shows the ``quotas`` extension, which provides + per-project quota management support. + + .. note:: + + Many of the extensions shown below are supported in the Mitaka release and later. + + .. code-block:: console + + +------------------------+------------------------+--------------------------+ + | Name | Alias | Description | + +------------------------+------------------------+--------------------------+ + | ... | ... | ... | + | Quota management | quotas | Expose functions for | + | support | | quotas management per | + | | | tenant | + | ... | ... | ... | + +------------------------+------------------------+--------------------------+ + +#. Show information for the quotas extension. + + To show information for the ``quotas`` extension, run this command: + + .. code-block:: console + + $ neutron ext-show quotas + +-------------+------------------------------------------------------------+ + | Field | Value | + +-------------+------------------------------------------------------------+ + | alias | quotas | + | description | Expose functions for quotas management per tenant | + | links | | + | name | Quota management support | + | namespace | https://docs.openstack.org/network/ext/quotas-sets/api/v2.0 | + | updated | 2012-07-29T10:00:00-00:00 | + +-------------+------------------------------------------------------------+ + + .. note:: + + Only some plug-ins support per-project quotas. + Specifically, Open vSwitch, Linux Bridge, and VMware NSX + support them, but new versions of other plug-ins might + bring additional functionality. See the documentation for + each plug-in. + +#. List projects who have per-project quota support. + + The :command:`neutron quota-list` command lists projects for which the + per-project quota is enabled. The command does not list projects with + default quota support. You must be an administrative user to run this + command: + + .. code-block:: console + + $ neutron quota-list + +------------+---------+------+--------+--------+----------------------------------+ + | floatingip | network | port | router | subnet | tenant_id | + +------------+---------+------+--------+--------+----------------------------------+ + | 20 | 5 | 20 | 10 | 5 | 6f88036c45344d9999a1f971e4882723 | + | 25 | 10 | 30 | 10 | 10 | bff5c9455ee24231b5bc713c1b96d422 | + +------------+---------+------+--------+--------+----------------------------------+ + +#. Show per-project quota values. + + The :command:`neutron quota-show` command reports the current + set of quota limits for the specified project. + Non-administrative users can run this command without the + ``--tenant_id`` parameter. If per-project quota limits are + not enabled for the project, the command shows the default + set of quotas. + + .. note:: + + Additional quotas added in the Mitaka release include ``security_group``, + ``security_group_rule``, ``subnet``, and ``subnetpool``. + + .. code-block:: console + + $ neutron quota-show --tenant_id 6f88036c45344d9999a1f971e4882723 + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 50 | + | network | 10 | + | port | 50 | + | rbac_policy | 10 | + | router | 10 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 10 | + | subnetpool | -1 | + +---------------------+-------+ + + The following command shows the command output for a + non-administrative user. + + .. code-block:: console + + $ neutron quota-show + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 50 | + | network | 10 | + | port | 50 | + | rbac_policy | 10 | + | router | 10 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 10 | + | subnetpool | -1 | + +---------------------+-------+ + +#. Update quota values for a specified project. + + Use the :command:`neutron quota-update` command to + update a quota for a specified project. + + .. code-block:: console + + $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --network 5 + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 50 | + | network | 5 | + | port | 50 | + | rbac_policy | 10 | + | router | 10 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 10 | + | subnetpool | -1 | + +---------------------+-------+ + + You can update quotas for multiple resources through one + command. + + .. code-block:: console + + $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --subnet 5 --port 20 + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 50 | + | network | 5 | + | port | 20 | + | rbac_policy | 10 | + | router | 10 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 5 | + | subnetpool | -1 | + +---------------------+-------+ + + To update the limits for an L3 resource such as, router + or floating IP, you must define new values for the quotas + after the ``--`` directive. + + This example updates the limit of the number of floating + IPs for the specified project. + + .. code-block:: console + + $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 --floatingip 20 + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 20 | + | network | 5 | + | port | 20 | + | rbac_policy | 10 | + | router | 10 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 5 | + | subnetpool | -1 | + +---------------------+-------+ + + You can update the limits of multiple resources by + including L2 resources and L3 resource through one + command: + + .. code-block:: console + + $ neutron quota-update --tenant_id 6f88036c45344d9999a1f971e4882723 \ + --network 3 --subnet 3 --port 3 --floatingip 3 --router 3 + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 3 | + | network | 3 | + | port | 3 | + | rbac_policy | 10 | + | router | 3 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 3 | + | subnetpool | -1 | + +---------------------+-------+ + +#. Delete per-project quota values. + + To clear per-project quota limits, use the + :command:`neutron quota-delete` command. + + .. code-block:: console + + $ neutron quota-delete --tenant_id 6f88036c45344d9999a1f971e4882723 + Deleted quota: 6f88036c45344d9999a1f971e4882723 + + After you run this command, you can see that quota + values for the project are reset to the default values. + + .. code-block:: console + + $ openstack quota show 6f88036c45344d9999a1f971e4882723 + +---------------------+-------+ + | Field | Value | + +---------------------+-------+ + | floatingip | 50 | + | network | 10 | + | port | 50 | + | rbac_policy | 10 | + | router | 10 | + | security_group | 10 | + | security_group_rule | 100 | + | subnet | 10 | + | subnetpool | -1 | + +---------------------+-------+ + +.. note:: + + Listing defualt quotas with the OpenStack command line client will + provide all quotas for networking and other services. Previously, + the :command:`neutron quota-show --tenant_id` would list only networking + quotas. diff -Nru neutron-14.0.2/doc/source/admin/ops.rst neutron-14.0.3/doc/source/admin/ops.rst --- neutron-14.0.2/doc/source/admin/ops.rst 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/doc/source/admin/ops.rst 2019-10-22 19:46:03.000000000 +0000 @@ -10,3 +10,4 @@ ops-ip-availability ops-resource-tags ops-resource-purge + ops-quotas diff -Nru neutron-14.0.2/neutron/agent/common/ovs_lib.py neutron-14.0.3/neutron/agent/common/ovs_lib.py --- neutron-14.0.2/neutron/agent/common/ovs_lib.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/common/ovs_lib.py 2019-10-22 19:46:04.000000000 +0000 @@ -508,6 +508,7 @@ options['local_ip'] = local_ip options['in_key'] = 'flow' options['out_key'] = 'flow' + options['egress_pkt_mark'] = '0' if tunnel_csum: options['csum'] = str(tunnel_csum).lower() if tos: diff -Nru neutron-14.0.2/neutron/agent/dhcp/agent.py neutron-14.0.3/neutron/agent/dhcp/agent.py --- neutron-14.0.2/neutron/agent/dhcp/agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/dhcp/agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -51,6 +51,8 @@ DHCP_PROCESS_GREENLET_MAX = 32 DHCP_PROCESS_GREENLET_MIN = 8 +DHCP_READY_PORTS_SYNC_MAX = 64 + def _sync_lock(f): """Decorator to block all operations for a global sync call.""" @@ -243,10 +245,15 @@ # this is just watching a set so we can do it really frequently eventlet.sleep(0.1) if self.dhcp_ready_ports: - ports_to_send = self.dhcp_ready_ports - self.dhcp_ready_ports = set() + ports_to_send = set() + for port_count in range(min(len(self.dhcp_ready_ports), + DHCP_READY_PORTS_SYNC_MAX)): + ports_to_send.add(self.dhcp_ready_ports.pop()) + try: self.plugin_rpc.dhcp_ready_on_ports(ports_to_send) + LOG.info("DHCP configuration for ports %s is completed", + ports_to_send) continue except oslo_messaging.MessagingTimeout: LOG.error("Timeout notifying server of ports ready. " diff -Nru neutron-14.0.2/neutron/agent/l3/agent.py neutron-14.0.3/neutron/agent/l3/agent.py --- neutron-14.0.2/neutron/agent/l3/agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/l3/agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -466,6 +466,15 @@ return True def _router_removed(self, ri, router_id): + """Delete the router and stop the auxiliary processes + + This stops the auxiliary processes (keepalived, keepvalived-state- + change, radvd, etc) and deletes the router ports and the namespace. + The "router_info" cache is updated too at the beginning of the process, + to avoid any other concurrent process to handle the router being + deleted. If an exception is raised, the "router_info" cache is + restored. + """ if ri is None: LOG.warning("Info for router %s was not found. " "Performing router cleanup", router_id) @@ -477,8 +486,12 @@ self.context, states=(ri,), resource_id=router_id)) - ri.delete() del self.router_info[router_id] + try: + ri.delete() + except Exception: + with excutils.save_and_reraise_exception(): + self.router_info[router_id] = ri registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri) diff -Nru neutron-14.0.2/neutron/agent/l3/ha.py neutron-14.0.3/neutron/agent/l3/ha.py --- neutron-14.0.2/neutron/agent/l3/ha.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/l3/ha.py 2019-10-22 19:46:04.000000000 +0000 @@ -14,6 +14,7 @@ # under the License. import os +import threading import eventlet from oslo_log import log as logging @@ -83,6 +84,8 @@ self.state_change_notifier = batch_notifier.BatchNotifier( self._calculate_batch_duration(), self.notify_server) eventlet.spawn(self._start_keepalived_notifications_server) + self._transition_states = {} + self._transition_state_mutex = threading.Lock() def _get_router_info(self, router_id): try: @@ -112,7 +115,44 @@ # default 2 seconds. return self.conf.ha_vrrp_advert_int + def _update_transition_state(self, router_id, new_state=None): + with self._transition_state_mutex: + transition_state = self._transition_states.get(router_id) + if new_state: + self._transition_states[router_id] = new_state + else: + self._transition_states.pop(router_id, None) + return transition_state + def enqueue_state_change(self, router_id, state): + """Inform the server about the new router state + + This function will also update the metadata proxy, the radvd daemon, + process the prefix delegation and inform to the L3 extensions. If the + HA router changes to "master", this transition will be delayed for at + least "ha_vrrp_advert_int" seconds. When the "master" router + transitions to "backup", "keepalived" will set the rest of HA routers + to "master" until it decides which one should be the only "master". + The transition from "backup" to "master" and then to "backup" again, + should not be registered in the Neutron server. + + :param router_id: router ID + :param state: ['master', 'backup'] + """ + if not self._update_transition_state(router_id, state): + eventlet.spawn_n(self._enqueue_state_change, router_id, state) + eventlet.sleep(0) + + def _enqueue_state_change(self, router_id, state): + # NOTE(ralonsoh): move 'master' and 'backup' constants to n-lib + if state == 'master': + eventlet.sleep(self.conf.ha_vrrp_advert_int) + if self._update_transition_state(router_id) != state: + # If the current "transition state" is not the initial "state" sent + # to update the router, that means the actual router state is the + # same as the "transition state" (e.g.: backup-->master-->backup). + return + state_change_data = {"router_id": router_id, "state": state} LOG.info('Router %(router_id)s transitioned to %(state)s', state_change_data) @@ -125,6 +165,7 @@ # configuration to keepalived-state-change in order to remove the # dependency that currently exists on l3-agent running for the IPv6 # failover. + ri.ha_state = state self._configure_ipv6_params(ri, state) if self.conf.enable_metadata_proxy: self._update_metadata_proxy(ri, router_id, state) @@ -182,9 +223,6 @@ ri.disable_radvd() def notify_server(self, batched_events): - eventlet.spawn_n(self._notify_server, batched_events) - - def _notify_server(self, batched_events): translated_states = dict((router_id, TRANSLATION_MAP[state]) for router_id, state in batched_events) LOG.debug('Updating server with HA routers states %s', diff -Nru neutron-14.0.2/neutron/agent/l3/ha_router.py neutron-14.0.3/neutron/agent/l3/ha_router.py --- neutron-14.0.2/neutron/agent/l3/ha_router.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/l3/ha_router.py 2019-10-22 19:46:04.000000000 +0000 @@ -69,6 +69,8 @@ self.ha_port = None self.keepalived_manager = None self.state_change_callback = state_change_callback + self._ha_state = None + self._ha_state_path = None def create_router_namespace_object( self, router_id, agent_conf, iface_driver, use_ipv6): @@ -76,6 +78,13 @@ router_id, agent_conf, iface_driver, use_ipv6) @property + def ha_state_path(self): + if not self._ha_state_path and self.keepalived_manager: + self._ha_state_path = (self.keepalived_manager. + get_full_config_file_path('state')) + return self._ha_state_path + + @property def ha_priority(self): return self.router.get('priority', keepalived.HA_DEFAULT_PRIORITY) @@ -85,22 +94,20 @@ @property def ha_state(self): - state = None - ha_state_path = self.keepalived_manager.get_full_config_file_path( - 'state') + if self._ha_state: + return self._ha_state try: - with open(ha_state_path, 'r') as f: - state = f.read() + with open(self.ha_state_path, 'r') as f: + self._ha_state = f.read() except (OSError, IOError): LOG.debug('Error while reading HA state for %s', self.router_id) - return state or 'unknown' + return self._ha_state or 'unknown' @ha_state.setter def ha_state(self, new_state): - ha_state_path = self.keepalived_manager.get_full_config_file_path( - 'state') + self._ha_state = new_state try: - with open(ha_state_path, 'w') as f: + with open(self.ha_state_path, 'w') as f: f.write(new_state) except (OSError, IOError): LOG.error('Error while writing HA state for %s', diff -Nru neutron-14.0.2/neutron/agent/l3/router_info.py neutron-14.0.3/neutron/agent/l3/router_info.py --- neutron-14.0.2/neutron/agent/l3/router_info.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/l3/router_info.py 2019-10-22 19:46:04.000000000 +0000 @@ -846,9 +846,8 @@ def _prevent_snat_for_internal_traffic_rule(self, interface_name): return ( - 'POSTROUTING', '! -i %(interface_name)s ' - '! -o %(interface_name)s -m conntrack ! ' - '--ctstate DNAT -j ACCEPT' % + 'POSTROUTING', '! -o %(interface_name)s -m conntrack ' + '! --ctstate DNAT -j ACCEPT' % {'interface_name': interface_name}) def external_gateway_nat_fip_rules(self, ex_gw_ip, interface_name): diff -Nru neutron-14.0.2/neutron/agent/linux/dhcp.py neutron-14.0.3/neutron/agent/linux/dhcp.py --- neutron-14.0.2/neutron/agent/linux/dhcp.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/linux/dhcp.py 2019-10-22 19:46:04.000000000 +0000 @@ -353,14 +353,10 @@ '--dhcp-match=set:ipxe,175', '--dhcp-userclass=set:ipxe6,iPXE', '--local-service', + '--bind-dynamic', ] - if self.device_manager.driver.bridged: + if not self.device_manager.driver.bridged: cmd += [ - '--bind-interfaces', - ] - else: - cmd += [ - '--bind-dynamic', '--bridge-interface=%s,tap*' % self.interface_name, ] diff -Nru neutron-14.0.2/neutron/agent/linux/ip_lib.py neutron-14.0.3/neutron/agent/linux/ip_lib.py --- neutron-14.0.2/neutron/agent/linux/ip_lib.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/linux/ip_lib.py 2019-10-22 19:46:04.000000000 +0000 @@ -993,14 +993,24 @@ return netns.listnetns(**kwargs) -def network_namespace_exists(namespace, **kwargs): +def network_namespace_exists(namespace, try_is_ready=False, **kwargs): """Check if a network namespace exists. :param namespace: The name of the namespace to check + :param try_is_ready: Try to open the namespace to know if the namespace + is ready to be operated. :param kwargs: Callers add any filters they use as kwargs """ - output = list_network_namespaces(**kwargs) - return namespace in output + if not try_is_ready: + output = list_network_namespaces(**kwargs) + return namespace in output + + try: + privileged.open_namespace(namespace) + return True + except (RuntimeError, OSError): + pass + return False def ensure_device_is_ready(device_name, namespace=None): diff -Nru neutron-14.0.2/neutron/agent/linux/openvswitch_firewall/firewall.py neutron-14.0.3/neutron/agent/linux/openvswitch_firewall/firewall.py --- neutron-14.0.2/neutron/agent/linux/openvswitch_firewall/firewall.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/linux/openvswitch_firewall/firewall.py 2019-10-22 19:46:04.000000000 +0000 @@ -17,11 +17,13 @@ import contextlib import copy +import eventlet import netaddr from neutron_lib.callbacks import events as callbacks_events from neutron_lib.callbacks import registry as callbacks_registry from neutron_lib.callbacks import resources as callbacks_resources from neutron_lib import constants as lib_const +from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils @@ -396,6 +398,7 @@ applied """ + self.permitted_ethertypes = cfg.CONF.SECURITYGROUP.permitted_ethertypes self.int_br = self.initialize_bridge(integration_bridge) self.sg_port_map = SGPortMap() self.conj_ip_manager = ConjIPFlowManager(self) @@ -533,7 +536,9 @@ if of_port.ofport != ovs_port.ofport: self.sg_port_map.remove_port(of_port) of_port = OFPort(port, ovs_port, of_port.vlan_tag) - self.sg_port_map.update_port(of_port, port) + self.sg_port_map.create_port(of_port, port) + else: + self.sg_port_map.update_port(of_port, port) return of_port @@ -662,6 +667,8 @@ """Pass packets from these ports directly to ingress pipeline.""" for port_id in port_ids: self._initialize_egress_no_port_security(port_id) + # yield to let other greenthreads proceed + eventlet.sleep(0) def remove_trusted_ports(self, port_ids): for port_id in port_ids: @@ -883,6 +890,27 @@ actions='resubmit(,%d)' % ovs_consts.DROPPED_TRAFFIC_TABLE ) + # Allow custom ethertypes + for permitted_ethertype in self.permitted_ethertypes: + if permitted_ethertype[:2] == '0x': + try: + hex_ethertype = hex(int(permitted_ethertype, base=16)) + action = ('resubmit(,%d)' % + ovs_consts.ACCEPTED_EGRESS_TRAFFIC_NORMAL_TABLE) + self._add_flow( + table=ovs_consts.BASE_EGRESS_TABLE, + priority=95, + dl_type=hex_ethertype, + reg_port=port.ofport, + actions=action + ) + continue + except ValueError: + pass + LOG.warning("Custom ethertype %(permitted_ethertype)s is not " + "a hexadecimal number.", + {'permitted_ethertype': permitted_ethertype}) + # Drop all remaining egress connections self._add_flow( table=ovs_consts.BASE_EGRESS_TABLE, @@ -998,6 +1026,7 @@ reg_port=port.ofport, actions='output:{:d}'.format(port.ofport) ) + self._initialize_ingress_ipv6_icmp(port) # DHCP offers diff -Nru neutron-14.0.2/neutron/agent/resource_cache.py neutron-14.0.3/neutron/agent/resource_cache.py --- neutron-14.0.2/neutron/agent/resource_cache.py 2019-07-01 02:54:48.000000000 +0000 +++ neutron-14.0.3/neutron/agent/resource_cache.py 2019-10-22 19:46:03.000000000 +0000 @@ -49,7 +49,7 @@ def start_watcher(self): self._watcher = RemoteResourceWatcher(self) - def get_resource_by_id(self, rtype, obj_id): + def get_resource_by_id(self, rtype, obj_id, agent_restarted=False): """Returns None if it doesn't exist.""" if obj_id in self._deleted_ids_by_type[rtype]: return None @@ -57,10 +57,12 @@ if cached_item: return cached_item # try server in case object existed before agent start - self._flood_cache_for_query(rtype, id=(obj_id, )) + self._flood_cache_for_query(rtype, id=(obj_id, ), + agent_restarted=agent_restarted) return self._type_cache(rtype).get(obj_id) - def _flood_cache_for_query(self, rtype, **filter_kwargs): + def _flood_cache_for_query(self, rtype, agent_restarted=False, + **filter_kwargs): """Load info from server for first query. Queries the server if this is the first time a given query for @@ -81,7 +83,8 @@ # been updated already and pushed to us in another thread. LOG.debug("Ignoring stale update for %s: %s", rtype, resource) continue - self.record_resource_update(context, rtype, resource) + self.record_resource_update(context, rtype, resource, + agent_restarted=agent_restarted) LOG.debug("%s resources returned for queries %s", len(resources), query_ids) self._satisfied_server_queries.update(query_ids) @@ -159,7 +162,8 @@ return True return False - def record_resource_update(self, context, rtype, resource): + def record_resource_update(self, context, rtype, resource, + agent_restarted=False): """Takes in an OVO and generates an event on relevant changes. A change is deemed to be relevant if it is not stale and if any @@ -190,7 +194,8 @@ registry.notify(rtype, events.AFTER_UPDATE, self, context=context, changed_fields=changed_fields, existing=existing, updated=resource, - resource_id=resource.id) + resource_id=resource.id, + agent_restarted=agent_restarted) def record_resource_delete(self, context, rtype, resource_id): # deletions are final, record them so we never diff -Nru neutron-14.0.2/neutron/agent/rpc.py neutron-14.0.3/neutron/agent/rpc.py --- neutron-14.0.2/neutron/agent/rpc.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/agent/rpc.py 2019-10-22 19:46:04.000000000 +0000 @@ -129,7 +129,8 @@ devices=devices, agent_id=agent_id, host=host) def get_devices_details_list_and_failed_devices(self, context, devices, - agent_id, host=None): + agent_id, host=None, + **kwargs): """Get devices details and the list of devices that failed. This method returns the devices details. If an error is thrown when @@ -228,6 +229,7 @@ the payloads the handlers are expecting (an ID). """ rtype = rtype.lower() # all legacy handlers don't camelcase + agent_restarted = kwargs.pop("agent_restarted", None) method, host_with_activation, host_with_deactivation = ( self._get_method_host(rtype, event, **kwargs)) if not hasattr(self._legacy_interface, method): @@ -244,6 +246,9 @@ else: payload = {rtype: {'id': resource_id}, '%s_id' % rtype: resource_id} + if method == "port_update" and agent_restarted is not None: + # Mark ovs-agent restart for local port_update + payload["agent_restarted"] = agent_restarted getattr(self._legacy_interface, method)(context, **payload) def _get_method_host(self, rtype, event, **kwargs): @@ -288,20 +293,23 @@ return method, host_with_activation, host_with_deactivation def get_devices_details_list_and_failed_devices(self, context, devices, - agent_id, host=None): + agent_id, host=None, + agent_restarted=False): result = {'devices': [], 'failed_devices': []} for device in devices: try: result['devices'].append( - self.get_device_details(context, device, agent_id, host)) + self.get_device_details(context, device, agent_id, host, + agent_restarted)) except Exception: LOG.exception("Failed to get details for device %s", device) result['failed_devices'].append(device) return result - def get_device_details(self, context, device, agent_id, host=None): + def get_device_details(self, context, device, agent_id, host=None, + agent_restarted=False): port_obj = self.remote_resource_cache.get_resource_by_id( - resources.PORT, device) + resources.PORT, device, agent_restarted) if not port_obj: LOG.debug("Device %s does not exist in cache.", device) return {'device': device} diff -Nru neutron-14.0.2/neutron/conf/agent/securitygroups_rpc.py neutron-14.0.3/neutron/conf/agent/securitygroups_rpc.py --- neutron-14.0.2/neutron/conf/agent/securitygroups_rpc.py 2019-07-01 02:54:48.000000000 +0000 +++ neutron-14.0.3/neutron/conf/agent/securitygroups_rpc.py 2019-10-22 19:46:03.000000000 +0000 @@ -36,7 +36,13 @@ default=True, help=_('Use ipset to speed-up the iptables based security groups. ' 'Enabling ipset support requires that ipset is installed on L2 ' - 'agent node.')) + 'agent node.')), + cfg.ListOpt( + 'permitted_ethertypes', + default=[], + help=_('Comma-separated list of ethertypes to be permitted, in ' + 'hexadecimal (starting with "0x"). For example, "0x4008" ' + 'to permit InfiniBand.')) ] diff -Nru neutron-14.0.2/neutron/db/db_base_plugin_common.py neutron-14.0.3/neutron/db/db_base_plugin_common.py --- neutron-14.0.2/neutron/db/db_base_plugin_common.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/db/db_base_plugin_common.py 2019-10-22 19:46:04.000000000 +0000 @@ -215,7 +215,7 @@ "admin_state_up": port["admin_state_up"], "status": port["status"], "fixed_ips": [{'subnet_id': ip["subnet_id"], - 'ip_address': ip["ip_address"]} + 'ip_address': str(ip["ip_address"])} for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} @@ -287,6 +287,10 @@ page_reverse=False): pager = base_obj.Pager(sorts, limit, page_reverse, marker) filters = filters or {} + # turn the CIDRs into a proper subnets + if filters.get('cidr'): + filters.update( + {'cidr': [netaddr.IPNetwork(x).cidr for x in filters['cidr']]}) # TODO(ihrachys) remove explicit reader usage when subnet OVO switches # to engine facade by default with db_api.CONTEXT_READER.using(context): diff -Nru neutron-14.0.2/neutron/db/ipam_pluggable_backend.py neutron-14.0.3/neutron/db/ipam_pluggable_backend.py --- neutron-14.0.2/neutron/db/ipam_pluggable_backend.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/db/ipam_pluggable_backend.py 2019-10-22 19:46:04.000000000 +0000 @@ -345,7 +345,7 @@ try: subnets = self._ipam_get_subnets( context, network_id=port['network_id'], host=host, - service_type=port.get('device_owner')) + service_type=port.get('device_owner'), fixed_configured=True) except ipam_exc.DeferIpam: subnets = [] diff -Nru neutron-14.0.2/neutron/db/l3_db.py neutron-14.0.3/neutron/db/l3_db.py --- neutron-14.0.2/neutron/db/l3_db.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/db/l3_db.py 2019-10-22 19:46:04.000000000 +0000 @@ -985,21 +985,13 @@ def _remove_interface_by_port(self, context, router_id, port_id, subnet_id, owner): - obj = l3_obj.RouterPort.get_object( - context, - port_id=port_id, - router_id=router_id, - port_type=owner - ) - if obj: - try: - port = self._core_plugin.get_port(context, obj.port_id) - except n_exc.PortNotFound: - raise l3_exc.RouterInterfaceNotFound( - router_id=router_id, port_id=port_id) - else: + ports = port_obj.Port.get_ports_by_router_and_port( + context, router_id, owner, port_id) + if len(ports) < 1: raise l3_exc.RouterInterfaceNotFound( router_id=router_id, port_id=port_id) + + port = ports[0] port_subnet_ids = [fixed_ip['subnet_id'] for fixed_ip in port['fixed_ips']] if subnet_id and subnet_id not in port_subnet_ids: @@ -1012,46 +1004,41 @@ context, router_id, port_subnet_id) self._core_plugin.delete_port(context, port['id'], l3_port_check=False) - return (port, subnets) + return port, subnets def _remove_interface_by_subnet(self, context, router_id, subnet_id, owner): self._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._core_plugin.get_subnet(context, subnet_id) + ports = port_obj.Port.get_ports_by_router_and_network( + context, router_id, owner, subnet['network_id']) - try: - ports = port_obj.Port.get_ports_by_router( - context, router_id, owner, subnet) - - for p in ports: - try: - p = self._core_plugin.get_port(context, p.id) - except n_exc.PortNotFound: - continue - port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']] - if subnet_id in port_subnets and len(port_subnets) > 1: - # multiple prefix port - delete prefix from port - fixed_ips = [dict(fip) for fip in p['fixed_ips'] - if fip['subnet_id'] != subnet_id] - self._core_plugin.update_port( - context, p['id'], {'port': {'fixed_ips': fixed_ips}}) - return (p, [subnet]) - elif subnet_id in port_subnets: - # only one subnet on port - delete the port - self._core_plugin.delete_port(context, p['id'], - l3_port_check=False) - return (p, [subnet]) - except exc.NoResultFound: - pass + for p in ports: + try: + p = self._core_plugin.get_port(context, p.id) + except n_exc.PortNotFound: + continue + port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']] + if subnet_id in port_subnets and len(port_subnets) > 1: + # multiple prefix port - delete prefix from port + fixed_ips = [dict(fip) for fip in p['fixed_ips'] + if fip['subnet_id'] != subnet_id] + self._core_plugin.update_port( + context, p['id'], {'port': {'fixed_ips': fixed_ips}}) + return (p, [subnet]) + elif subnet_id in port_subnets: + # only one subnet on port - delete the port + self._core_plugin.delete_port(context, p['id'], + l3_port_check=False) + return (p, [subnet]) raise l3_exc.RouterInterfaceNotFoundForSubnet( router_id=router_id, subnet_id=subnet_id) @db_api.retry_if_session_inactive() def remove_router_interface(self, context, router_id, interface_info): - remove_by_port, remove_by_subnet = ( - self._validate_interface_info(interface_info, for_removal=True) - ) + remove_by_port, _ = self._validate_interface_info(interface_info, + for_removal=True) port_id = interface_info.get('port_id') subnet_id = interface_info.get('subnet_id') device_owner = self._get_device_owner(context, router_id) @@ -1059,9 +1046,6 @@ port, subnets = self._remove_interface_by_port(context, router_id, port_id, subnet_id, device_owner) - # remove_by_subnet is not used here, because the validation logic of - # _validate_interface_info ensures that at least one of remote_by_* - # is True. else: port, subnets = self._remove_interface_by_subnet( context, router_id, subnet_id, device_owner) @@ -1418,8 +1402,10 @@ floatingip_id=fip_id, floatingip_db=floatingip_db) - self._core_plugin.update_port(context.elevated(), external_port['id'], - {'port': {'device_id': fip_id}}) + self._core_plugin.update_port( + context.elevated(), external_port['id'], + {'port': {'device_id': fip_id, + 'project_id': fip['tenant_id']}}) registry.notify(resources.FLOATING_IP, events.AFTER_UPDATE, self._update_fip_assoc, diff -Nru neutron-14.0.2/neutron/db/l3_dvr_db.py neutron-14.0.3/neutron/db/l3_dvr_db.py --- neutron-14.0.2/neutron/db/l3_dvr_db.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/db/l3_dvr_db.py 2019-10-22 19:46:04.000000000 +0000 @@ -30,6 +30,7 @@ from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils +from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import helpers as log_helper from oslo_log import log as logging @@ -156,6 +157,19 @@ @registry.receives(resources.ROUTER, [events.AFTER_UPDATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) + def _delete_distributed_port_bindings_after_change(self, resource, event, + trigger, context, + router_id, router, + request_attrs, + router_db, **kwargs): + old_router = kwargs['old_router'] + if (old_router and old_router['distributed'] and not + router['distributed']): + self._core_plugin.delete_distributed_port_bindings_by_router_id( + context.elevated(), router_db['id']) + + @registry.receives(resources.ROUTER, [events.AFTER_UPDATE], + priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) def _delete_snat_interfaces_after_change(self, resource, event, trigger, context, router_id, router, request_attrs, router_db, @@ -966,13 +980,9 @@ def create_fip_agent_gw_port_if_not_exists( self, context, network_id, host): - """Function to return the FIP Agent GW port. - - This function will create a FIP Agent GW port - if required. If the port already exists, it - will return the existing port and will not - create a new one. - """ + # TODO(slaweq): add proper constraint on database level to avoid + # creation of duplicated Floating IP gateway ports for same network and + # same L3 agent. When this will be done, we can get rid of this lock. try: l3_agent_db = self._get_agent_by_type_and_host( context, const.AGENT_TYPE_L3, host) @@ -984,32 +994,48 @@ l3_agent_mode = self._get_agent_mode(l3_agent_db) if l3_agent_mode == const.L3_AGENT_MODE_DVR_NO_EXTERNAL: return - if l3_agent_db: - LOG.debug("Agent ID exists: %s", l3_agent_db['id']) - agent_port = self._get_agent_gw_ports_exist_for_network( - context, network_id, host, l3_agent_db['id']) + if not l3_agent_db: + return + lock_name = 'fip-gw-lock-' + network_id + '-' + host + with lockutils.lock(lock_name, external=True): + return self._create_fip_agent_gw_port_if_not_exists( + context, network_id, host, l3_agent_db) + + def _create_fip_agent_gw_port_if_not_exists(self, context, network_id, + host, l3_agent_db): + """Function to return the FIP Agent GW port. + + This function will create a FIP Agent GW port + if required. If the port already exists, it + will return the existing port and will not + create a new one. + """ + LOG.debug("Agent ID exists: %s", l3_agent_db['id']) + agent_port = self._get_agent_gw_ports_exist_for_network( + context, network_id, host, l3_agent_db['id']) + if not agent_port: + LOG.info("Floating IP Agent Gateway port for network %s " + "does not exist on host %s. Creating one.", + network_id, host) + port_data = {'tenant_id': '', + 'network_id': network_id, + 'device_id': l3_agent_db['id'], + 'device_owner': const.DEVICE_OWNER_AGENT_GW, + portbindings.HOST_ID: host, + 'admin_state_up': True, + 'name': ''} + agent_port = plugin_utils.create_port( + self._core_plugin, context, {'port': port_data}) if not agent_port: - LOG.info("Floating IP Agent Gateway port does not exist, " - "creating one") - port_data = {'tenant_id': '', - 'network_id': network_id, - 'device_id': l3_agent_db['id'], - 'device_owner': const.DEVICE_OWNER_AGENT_GW, - portbindings.HOST_ID: host, - 'admin_state_up': True, - 'name': ''} - agent_port = plugin_utils.create_port( - self._core_plugin, context, {'port': port_data}) - if not agent_port: - msg = _("Unable to create Floating IP Agent Gateway port") - raise n_exc.BadRequest(resource='router', msg=msg) - LOG.debug("Floating IP Agent Gateway port %(gw)s created " - "for the destination host: %(dest_host)s", - {'gw': agent_port, - 'dest_host': host}) + msg = _("Unable to create Floating IP Agent Gateway port") + raise n_exc.BadRequest(resource='router', msg=msg) + LOG.debug("Floating IP Agent Gateway port %(gw)s created " + "for the destination host: %(dest_host)s", + {'gw': agent_port, + 'dest_host': host}) - self._populate_mtu_and_subnets_for_ports(context, [agent_port]) - return agent_port + self._populate_mtu_and_subnets_for_ports(context, [agent_port]) + return agent_port def _generate_arp_table_and_notify_agent( self, context, fixed_ip, mac_address, notifier): diff -Nru neutron-14.0.2/neutron/db/qos/models.py neutron-14.0.3/neutron/db/qos/models.py --- neutron-14.0.2/neutron/db/qos/models.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/db/qos/models.py 2019-10-22 19:46:03.000000000 +0000 @@ -54,6 +54,12 @@ models_v2.Network, load_on_pending=True, backref=sa.orm.backref("qos_policy_binding", uselist=False, cascade='delete', lazy='joined')) + port = sa.orm.relationship( + models_v2.Port, + primaryjoin='QosNetworkPolicyBinding.network_id == Port.network_id', + foreign_keys=network_id, + backref=sa.orm.backref('qos_network_policy_binding', uselist=False, + viewonly=True, lazy='joined')) class QosFIPPolicyBinding(model_base.BASEV2): diff -Nru neutron-14.0.2/neutron/db/securitygroups_db.py neutron-14.0.3/neutron/db/securitygroups_db.py --- neutron-14.0.2/neutron/db/securitygroups_db.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/db/securitygroups_db.py 2019-10-22 19:46:04.000000000 +0000 @@ -771,7 +771,8 @@ tenant_id = kwargs['original_' + resource]['tenant_id'] else: tenant_id = kwargs[resource]['tenant_id'] - self._ensure_default_security_group(context, tenant_id) + if tenant_id: + self._ensure_default_security_group(context, tenant_id) def _ensure_default_security_group(self, context, tenant_id): """Create a default security group if one doesn't exist. diff -Nru neutron-14.0.2/neutron/notifiers/batch_notifier.py neutron-14.0.3/neutron/notifiers/batch_notifier.py --- neutron-14.0.2/neutron/notifiers/batch_notifier.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/notifiers/batch_notifier.py 2019-10-22 19:46:04.000000000 +0000 @@ -10,17 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. +import threading + import eventlet -from neutron_lib.utils import runtime -from oslo_utils import uuidutils class BatchNotifier(object): def __init__(self, batch_interval, callback): - self.pending_events = [] + self._pending_events = eventlet.Queue() self.callback = callback self.batch_interval = batch_interval - self._lock_identifier = 'notifier-%s' % uuidutils.generate_uuid() + self._mutex = threading.Lock() def queue_event(self, event): """Called to queue sending an event with the next batch of events. @@ -33,32 +33,35 @@ This replaces the loopingcall with a mechanism that creates a short-lived thread on demand whenever an event is queued. That thread - will wait for a lock, send all queued events and then sleep for - 'batch_interval' seconds to allow other events to queue up. - - This effectively acts as a rate limiter to only allow 1 batch per - 'batch_interval' seconds. + will check if the lock is released, send all queued events and then + sleep for 'batch_interval' seconds. If at the end of this sleep time, + other threads have added new events to the event queue, the same thread + will process them. + + At the same time, other threads will be able to add new events to the + queue and will spawn new "synced_send" threads to process them. But if + the mutex is locked, the spawned thread will end immediately. :param event: the event that occurred. """ if not event: return - self.pending_events.append(event) + self._pending_events.put(event) - @runtime.synchronized(self._lock_identifier) def synced_send(): - self._notify() - # sleeping after send while holding the lock allows subsequent - # events to batch up - eventlet.sleep(self.batch_interval) + if not self._mutex.locked(): + with self._mutex: + while not self._pending_events.empty(): + self._notify() + # sleeping after send while holding the lock allows + # subsequent events to batch up + eventlet.sleep(self.batch_interval) eventlet.spawn_n(synced_send) def _notify(self): - if not self.pending_events: - return - - batched_events = self.pending_events - self.pending_events = [] + batched_events = [] + while not self._pending_events.empty(): + batched_events.append(self._pending_events.get()) self.callback(batched_events) diff -Nru neutron-14.0.2/neutron/objects/ports.py neutron-14.0.3/neutron/objects/ports.py --- neutron-14.0.2/neutron/objects/ports.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/objects/ports.py 2019-10-22 19:46:04.000000000 +0000 @@ -14,6 +14,7 @@ import netaddr from neutron_lib import constants +from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import fields as obj_fields @@ -28,6 +29,8 @@ from neutron.objects.qos import binding from neutron.plugins.ml2 import models as ml2_models +LOG = logging.getLogger(__name__) + class PortBindingBase(base.NeutronDbObject): @@ -265,7 +268,8 @@ # Version 1.2: Added segment_id to binding_levels # Version 1.3: distributed_binding -> distributed_bindings # Version 1.4: Attribute binding becomes ListOfObjectsField - VERSION = '1.4' + # Version 1.5: Added qos_network_policy_id field + VERSION = '1.5' db_model = models_v2.Port @@ -309,6 +313,8 @@ default=None, ), 'qos_policy_id': common_types.UUIDField(nullable=True, default=None), + 'qos_network_policy_id': common_types.UUIDField(nullable=True, + default=None), 'binding_levels': obj_fields.ListOfObjectsField( 'PortBindingLevel', nullable=True @@ -332,6 +338,7 @@ 'dns', 'fixed_ips', 'qos_policy_id', + 'qos_network_policy_id', 'security', 'security_group_ids', ] @@ -460,16 +467,18 @@ } else: self.security_group_ids = set() - self.obj_reset_changes(['security_group_ids']) + fields_to_change = ['security_group_ids'] # extract qos policy binding if db_obj.get('qos_policy_binding'): - self.qos_policy_id = ( - db_obj.qos_policy_binding.policy_id - ) - else: - self.qos_policy_id = None - self.obj_reset_changes(['qos_policy_id']) + self.qos_policy_id = db_obj.qos_policy_binding.policy_id + fields_to_change.append('qos_policy_id') + if db_obj.get('qos_network_policy_binding'): + self.qos_network_policy_id = ( + db_obj.qos_network_policy_binding.policy_id) + fields_to_change.append('qos_network_policy_binding') + + self.obj_reset_changes(fields_to_change) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) @@ -497,17 +506,69 @@ constants.ACTIVE): primitive['binding'] = a_binding break + if _target_version < (1, 5): + primitive.pop('qos_network_policy_id', None) @classmethod - def get_ports_by_router(cls, context, router_id, owner, subnet): - rport_qry = context.session.query(models_v2.Port).join( - l3.RouterPort) - ports = rport_qry.filter( - l3.RouterPort.router_id == router_id, - l3.RouterPort.port_type == owner, - models_v2.Port.network_id == subnet['network_id'] - ) - return [cls._load_object(context, db_obj) for db_obj in ports.all()] + def get_ports_by_router_and_network(cls, context, router_id, owner, + network_id): + """Returns port objects filtering by router ID, owner and network ID""" + rports_filter = (models_v2.Port.network_id == network_id, ) + router_filter = (models_v2.Port.network_id == network_id, ) + return cls._get_ports_by_router(context, router_id, owner, + rports_filter, router_filter) + + @classmethod + def get_ports_by_router_and_port(cls, context, router_id, owner, port_id): + """Returns port objects filtering by router ID, owner and port ID""" + rports_filter = (l3.RouterPort.port_id == port_id, ) + router_filter = (models_v2.Port.id == port_id, ) + return cls._get_ports_by_router(context, router_id, owner, + rports_filter, router_filter) + + @classmethod + def _get_ports_by_router(cls, context, router_id, owner, rports_filter, + router_filter): + """Returns port objects filtering by router id and owner + + The method will receive extra filters depending of the caller (filter + by network or filter by port). + + The ports are retrieved using: + - The RouterPort registers. Each time a port is assigned to a router, + a new RouterPort register is added to the DB. + - The port owner and device_id information. + + Both searches should return the same result. If not, a warning message + is logged and the port list to be returned is completed with the + missing ones. + """ + rports_filter += (l3.RouterPort.router_id == router_id, + l3.RouterPort.port_type == owner) + router_filter += (models_v2.Port.device_id == router_id, + models_v2.Port.device_owner == owner) + + ports = context.session.query(models_v2.Port).join( + l3.RouterPort).filter(*rports_filter) + ports_rports = [cls._load_object(context, db_obj) + for db_obj in ports.all()] + + ports = context.session.query(models_v2.Port).filter(*router_filter) + ports_router = [cls._load_object(context, db_obj) + for db_obj in ports.all()] + + ports_rports_ids = {p.id for p in ports_rports} + ports_router_ids = {p.id for p in ports_router} + missing_port_ids = ports_router_ids - ports_rports_ids + if missing_port_ids: + LOG.warning('The following ports, assigned to router ' + '%(router_id)s, do not have a "routerport" register: ' + '%(port_ids)s', {'router_id': router_id, + 'port_ids': missing_port_ids}) + port_objs = [p for p in ports_router if p.id in missing_port_ids] + ports_rports += port_objs + + return ports_rports @classmethod def get_ports_ids_by_security_groups(cls, context, security_group_ids, diff -Nru neutron-14.0.2/neutron/objects/rbac_db.py neutron-14.0.3/neutron/objects/rbac_db.py --- neutron-14.0.2/neutron/objects/rbac_db.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/objects/rbac_db.py 2019-10-22 19:46:04.000000000 +0000 @@ -89,35 +89,6 @@ context.tenant_id)) @classmethod - def get_object(cls, context, **kwargs): - # We want to get the policy regardless of its tenant id. We'll make - # sure the tenant has permission to access the policy later on. - admin_context = context.elevated() - with cls.db_context_reader(admin_context): - obj = super(RbacNeutronDbObjectMixin, - cls).get_object(admin_context, **kwargs) - if (not obj or not cls.is_accessible(context, obj)): - return - return obj - - @classmethod - def get_objects(cls, context, _pager=None, validate_filters=True, - **kwargs): - # We want to get the policy regardless of its tenant id. We'll make - # sure the tenant has permission to access the policy later on. - admin_context = context.elevated() - with cls.db_context_reader(admin_context): - objs = super(RbacNeutronDbObjectMixin, - cls).get_objects(admin_context, _pager, - validate_filters, **kwargs) - result = [] - for obj in objs: - if not cls.is_accessible(context, obj): - continue - result.append(obj) - return result - - @classmethod def _get_db_obj_rbac_entries(cls, context, rbac_obj_id, rbac_action): rbac_db_model = cls.rbac_db_cls.db_model return db_utils.model_query(context, rbac_db_model).filter( diff -Nru neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py --- neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py 2019-10-22 19:46:03.000000000 +0000 @@ -44,7 +44,8 @@ # Various tables for DVR use of integration bridge flows DVR_TO_SRC_MAC = 1 DVR_TO_SRC_MAC_VLAN = 2 - +ARP_DVR_MAC_TO_DST_MAC = 3 +ARP_DVR_MAC_TO_DST_MAC_VLAN = 4 CANARY_TABLE = 23 # Table for ARP poison/spoofing prevention rules diff -Nru neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py --- neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py 2019-10-22 19:46:03.000000000 +0000 @@ -18,6 +18,8 @@ from os_ken.lib.packet import icmpv6 from os_ken.lib.packet import in_proto +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants + class OVSDVRProcessMixin(object): """Common logic for br-tun and br-phys' DVR_PROCESS tables. @@ -37,7 +39,7 @@ (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_ip=gateway_ip) - self.install_drop(table_id=self.dvr_process_table_id, + self.install_drop(table_id=constants.FLOOD_TO_TUN, priority=3, match=match) @@ -45,7 +47,7 @@ (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_ip=gateway_ip) - self.uninstall_flows(table_id=self.dvr_process_table_id, + self.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=match) @staticmethod @@ -61,14 +63,14 @@ (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_mac=gateway_mac) - self.install_drop(table_id=self.dvr_process_table_id, priority=3, + self.install_drop(table_id=constants.FLOOD_TO_TUN, priority=3, match=match) def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac): (_dp, ofp, ofpp) = self._get_dp() match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag, gateway_mac=gateway_mac) - self.uninstall_flows(table_id=self.dvr_process_table_id, + self.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=match) @staticmethod diff -Nru neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py --- neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py 2019-10-22 19:46:04.000000000 +0000 @@ -96,6 +96,42 @@ self.uninstall_flows(match=match) @staticmethod + def _arp_dvr_dst_mac_match(ofp, ofpp, vlan, dvr_mac): + # If eth_dst is equal to the dvr mac of this host, then + # flag it as matched. + return ofpp.OFPMatch(vlan_vid=vlan | ofp.OFPVID_PRESENT, + eth_dst=dvr_mac) + + @staticmethod + def _dvr_dst_mac_table_id(network_type): + if network_type == p_const.TYPE_VLAN: + return constants.ARP_DVR_MAC_TO_DST_MAC_VLAN + else: + return constants.ARP_DVR_MAC_TO_DST_MAC + + def install_dvr_dst_mac_for_arp(self, network_type, + vlan_tag, gateway_mac, dvr_mac, rtr_port): + table_id = self._dvr_dst_mac_table_id(network_type) + # Match the destination MAC with the DVR MAC + (_dp, ofp, ofpp) = self._get_dp() + match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac) + # Incoming packet will come with destination MAC of DVR host MAC from + # the ARP Responder. The Source MAC in this case will have the source + # MAC of the port MAC that responded from the ARP responder. + # So just remove the DVR host MAC from the 'eth_dst' and replace it + # with the gateway-mac. The packet should end up in the right the table + # for the packet to reach the router interface. + actions = [ + ofpp.OFPActionSetField(eth_dst=gateway_mac), + ofpp.OFPActionPopVlan(), + ofpp.OFPActionOutput(rtr_port, 0) + ] + self.install_apply_actions(table_id=table_id, + priority=5, + match=match, + actions=actions) + + @staticmethod def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac): return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT, eth_dst=dst_mac) @@ -165,6 +201,37 @@ self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, in_port=port, eth_src=mac) + def delete_dvr_dst_mac_for_arp(self, network_type, + vlan_tag, gateway_mac, dvr_mac, rtr_port): + table_id = self._dvr_to_src_mac_table_id(network_type) + (_dp, ofp, ofpp) = self._get_dp() + match = self._arp_dvr_dst_mac_match(ofp, ofpp, vlan_tag, dvr_mac) + for table in table_id: + self.uninstall_flows( + strict=True, priority=5, table_id=table, match=match) + + def add_dvr_gateway_mac_arp_vlan(self, mac, port): + self.install_goto(table_id=constants.LOCAL_SWITCHING, + priority=5, + in_port=port, + eth_dst=mac, + dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC_VLAN) + + def remove_dvr_gateway_mac_arp_vlan(self, mac, port): + self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, + eth_dst=mac) + + def add_dvr_gateway_mac_arp_tun(self, mac, port): + self.install_goto(table_id=constants.LOCAL_SWITCHING, + priority=5, + in_port=port, + eth_dst=mac, + dest_table_id=constants.ARP_DVR_MAC_TO_DST_MAC) + + def remove_dvr_gateway_mac_arp_tun(self, mac, port): + self.uninstall_flows(table_id=constants.LOCAL_SWITCHING, + eth_dst=mac) + @staticmethod def _arp_reply_match(ofp, ofpp, port): return ofpp.OFPMatch(in_port=port, diff -Nru neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py --- neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py 2019-10-22 19:46:03.000000000 +0000 @@ -119,7 +119,8 @@ patch_int_ofport=constants.OFPORT_INVALID, patch_tun_ofport=constants.OFPORT_INVALID, host=None, enable_tunneling=False, - enable_distributed_routing=False): + enable_distributed_routing=False, + arp_responder_enabled=False): self.context = context self.plugin_rpc = plugin_rpc self.host = host @@ -133,6 +134,7 @@ patch_int_ofport, patch_tun_ofport) self.reset_dvr_parameters() self.dvr_mac_address = None + self.arp_responder_enabled = arp_responder_enabled if self.enable_distributed_routing: self.get_dvr_mac_address() @@ -262,6 +264,10 @@ phys_br.add_dvr_mac_vlan(mac=mac, port=self.phys_ofports[physical_network]) + def _add_arp_dvr_mac_for_phys_br(self, physical_network, mac): + self.int_br.add_dvr_gateway_mac_arp_vlan( + mac=mac, port=self.int_ofports[physical_network]) + def _remove_dvr_mac_for_phys_br(self, physical_network, mac): # REVISIT(yamamoto): match in_port as well? self.int_br.remove_dvr_mac_vlan(mac=mac) @@ -273,6 +279,10 @@ self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport) + def _add_arp_dvr_mac_for_tun_br(self, mac): + self.int_br.add_dvr_gateway_mac_arp_tun( + mac=mac, port=self.patch_tun_ofport) + def _remove_dvr_mac_for_tun_br(self, mac): self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) # REVISIT(yamamoto): match in_port as well? @@ -286,6 +296,13 @@ LOG.debug("Added DVR MAC flow for %s", mac) self.registered_dvr_macs.add(mac) + def _add_dvr_mac_for_arp(self, mac): + for physical_network in self.bridge_mappings: + self._add_arp_dvr_mac_for_phys_br(physical_network, mac) + if self.enable_tunneling: + self._add_arp_dvr_mac_for_tun_br(mac) + LOG.debug("Added ARP DVR MAC flow for %s", mac) + def _remove_dvr_mac(self, mac): for physical_network in self.bridge_mappings: self._remove_dvr_mac_for_phys_br(physical_network, mac) @@ -301,6 +318,8 @@ c_mac = netaddr.EUI(mac['mac_address'], dialect=netaddr.mac_unix_expanded) if c_mac == self.dvr_mac_address: + self._add_dvr_mac_for_arp(c_mac) + LOG.debug("Added the DVR MAC rule for ARP %s", c_mac) continue self._add_dvr_mac(c_mac) @@ -406,6 +425,15 @@ gateway_mac=subnet_info['gateway_mac'], dst_mac=comp_ovsport.get_mac(), dst_port=comp_ovsport.get_ofport()) + # Add the following flow rule only when ARP RESPONDER is + # enabled + if self.arp_responder_enabled: + self.int_br.install_dvr_dst_mac_for_arp( + lvm.network_type, + vlan_tag=lvm.vlan, + gateway_mac=port.vif_mac, + dvr_mac=self.dvr_mac_address, + rtr_port=port.ofport) if lvm.network_type == n_const.TYPE_VLAN: # TODO(vivek) remove the IPv6 related flows once SNAT is not @@ -600,7 +628,16 @@ network_type=network_type, vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac()) ldm.remove_all_compute_ofports() - + # If ARP Responder enabled, remove the rule that redirects + # the dvr_mac_address destination to the router port, since + # the router port is removed or unbound. + if self.arp_responder_enabled: + self.int_br.delete_dvr_dst_mac_for_arp( + network_type=network_type, + vlan_tag=vlan_to_use, + gateway_mac=port.vif_mac, + dvr_mac=self.dvr_mac_address, + rtr_port=port.ofport) if ldm.get_csnat_ofport() == constants.OFPORT_INVALID: # if there is no csnat port for this subnet, remove # this subnet from local_dvr_map, as no dvr (or) csnat diff -Nru neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py --- neutron-14.0.2/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -186,7 +186,8 @@ self.network_ports = collections.defaultdict(set) # keeps association between ports and ofports to detect ofport change self.vifname_to_ofport_map = {} - self.setup_rpc() + # Stores newly created bridges + self.added_bridges = list() self.bridge_mappings = self._parse_bridge_mappings( ovs_conf.bridge_mappings) self.rp_bandwidths = place_utils.parse_rp_bandwidths( @@ -227,6 +228,8 @@ self.setup_tunnel_br(ovs_conf.tunnel_bridge) self.setup_tunnel_br_flows() + self.setup_rpc() + self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( self.context, self.dvr_plugin_rpc, @@ -240,7 +243,8 @@ self.patch_tun_ofport, host, self.enable_tunneling, - self.enable_distributed_routing) + self.enable_distributed_routing, + self.arp_responder_enabled) if self.enable_distributed_routing: self.dvr_agent.setup_dvr_flows() @@ -438,11 +442,19 @@ def port_update(self, context, **kwargs): port = kwargs.get('port') + agent_restarted = kwargs.pop("agent_restarted", False) # Put the port identifier in the updated_ports set. # Even if full port details might be provided to this call, # they are not used since there is no guarantee the notifications # are processed in the same order as the relevant API requests - self.updated_ports.add(port['id']) + if not agent_restarted: + # When ovs-agent is just restarted, the first RPC loop will + # process all the port as 'added'. And all of these ports will + # send a port_update notification after that processing. This + # will cause all these ports to be processed again in next RPC + # loop as 'updated'. So here we just ignore such local update + # notification. + self.updated_ports.add(port['id']) def port_delete(self, context, **kwargs): port_id = kwargs.get('port_id') @@ -897,14 +909,17 @@ cur_info = info_by_port[port.port_name] except KeyError: continue + str_vlan = str(lvm.vlan) other_config = cur_info['other_config'] if (cur_info['tag'] != lvm.vlan or - other_config.get('tag') != lvm.vlan): - other_config['tag'] = str(lvm.vlan) + other_config.get('tag') != str_vlan): + other_config['tag'] = str_vlan self.int_br.set_db_attribute( "Port", port.port_name, "other_config", other_config) # Uninitialized port has tag set to [] if cur_info['tag']: + LOG.warning("Uninstall flows of ofport %s due to " + "local vlan change.", port.ofport) self.int_br.uninstall_flows(in_port=port.ofport) def _bind_devices(self, need_binding_ports): @@ -1152,6 +1167,21 @@ self.arp_responder_enabled) def _reconfigure_physical_bridges(self, bridges): + try: + sync = self._do_reconfigure_physical_bridges(bridges) + self.added_bridges = [] + except RuntimeError: + # If there was error and bridges aren't properly reconfigured, + # there is no need to do full sync once again. It will be done when + # reconfiguration of physical bridges will be finished without + # errors + sync = False + self.added_bridges = bridges + LOG.warning("RuntimeError during setup of physical bridges: %s", + bridges) + return sync + + def _do_reconfigure_physical_bridges(self, bridges): sync = False bridge_mappings = {} for bridge in bridges: @@ -1633,12 +1663,14 @@ skipped_devices = [] need_binding_devices = [] binding_no_activated_devices = set() + agent_restarted = self.iter_num == 0 devices_details_list = ( self.plugin_rpc.get_devices_details_list_and_failed_devices( self.context, devices, self.agent_id, - self.conf.host)) + self.conf.host, + agent_restarted)) failed_devices = set(devices_details_list.get('failed_devices')) devices = devices_details_list.get('devices') @@ -2183,8 +2215,9 @@ self.loop_count_and_wait(start, port_stats) continue # Check if any physical bridge wasn't recreated recently + added_bridges = idl_monitor.bridges_added + self.added_bridges bridges_recreated = self._reconfigure_physical_bridges( - idl_monitor.bridges_added) + added_bridges) sync |= bridges_recreated # Notify the plugin of tunnel IP if self.enable_tunneling and tunnel_sync: diff -Nru neutron-14.0.2/neutron/plugins/ml2/drivers/type_vlan.py neutron-14.0.3/neutron/plugins/ml2/drivers/type_vlan.py --- neutron-14.0.2/neutron/plugins/ml2/drivers/type_vlan.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/drivers/type_vlan.py 2019-10-22 19:46:04.000000000 +0000 @@ -241,7 +241,7 @@ msg = (_("physical_network '%s' unknown " "for VLAN provider network") % physical_network) raise exc.InvalidInput(error_message=msg) - if segmentation_id: + if segmentation_id is not None: if not plugin_utils.is_valid_vlan_tag(segmentation_id): msg = (_("segmentation_id out of range (%(min)s through " "%(max)s)") % @@ -254,7 +254,7 @@ "to be specified when creating a provider " "network") % physical_network) raise exc.InvalidInput(error_message=msg) - elif segmentation_id: + elif segmentation_id is not None: msg = _("segmentation_id requires physical_network for VLAN " "provider network") raise exc.InvalidInput(error_message=msg) diff -Nru neutron-14.0.2/neutron/plugins/ml2/plugin.py neutron-14.0.3/neutron/plugins/ml2/plugin.py --- neutron-14.0.2/neutron/plugins/ml2/plugin.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/plugin.py 2019-10-22 19:46:04.000000000 +0000 @@ -439,7 +439,7 @@ binding.host = host changes = True - vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE) + vnic_type = attrs.get(portbindings.VNIC_TYPE) if attrs else None if (validators.is_attr_set(vnic_type) and binding.vnic_type != vnic_type): binding.vnic_type = vnic_type @@ -694,6 +694,22 @@ # Call the mechanism driver precommit methods, commit # the results, and call the postcommit methods. self.mechanism_manager.update_port_precommit(cur_context) + else: + # Try to populate the PortContext with the current binding + # levels so that the RPC notification won't get suppressed. + # This is to avoid leaving ports stuck in a DOWN state. + # For more information see bug: + # https://bugs.launchpad.net/neutron/+bug/1755810 + LOG.warning("Concurrent port binding operations failed on " + "port %s", port_id) + levels = db.get_binding_level_objs(plugin_context, port_id, + cur_binding.host) + for level in levels: + cur_context._push_binding_level(level) + # refresh context with a snapshot of the current binding state + cur_context._binding = driver_context.InstanceSnapshot( + cur_binding) + if commit: # Continue, using the port state as of the transaction that # just finished, whether that transaction committed new @@ -1141,7 +1157,11 @@ # called inside of a transaction. return super(Ml2Plugin, self).delete_network(context, id) - @registry.receives(resources.NETWORK, [events.PRECOMMIT_DELETE]) + # NOTE(mgoddard): Use a priority of zero to ensure this handler runs before + # other precommit handlers. This is necessary to ensure we avoid another + # handler deleting a subresource of the network, e.g. segments. + @registry.receives(resources.NETWORK, [events.PRECOMMIT_DELETE], + priority=0) def _network_delete_precommit_handler(self, rtype, event, trigger, context, network_id, **kwargs): network = self.get_network(context, network_id) @@ -1252,7 +1272,10 @@ # called inside of a transaction. return super(Ml2Plugin, self).delete_subnet(context, id) - @registry.receives(resources.SUBNET, [events.PRECOMMIT_DELETE]) + # NOTE(mgoddard): Use a priority of zero to ensure this handler runs before + # other precommit handlers. This is necessary to ensure we avoid another + # handler deleting a subresource of the subnet. + @registry.receives(resources.SUBNET, [events.PRECOMMIT_DELETE], priority=0) def _subnet_delete_precommit_handler(self, rtype, event, trigger, context, subnet_id, **kwargs): subnet_obj = self._get_subnet_object(context, subnet_id) @@ -1386,6 +1409,11 @@ return bound_context.current + def _ensure_security_groups_on_port(self, context, port_dict): + port_compat = {'port': port_dict} + sgids = self._get_security_groups_on_port(context, port_compat) + self._process_port_create_security_group(context, port_dict, sgids) + @utils.transaction_guard @db_api.retry_if_session_inactive() def create_port_bulk(self, context, ports): @@ -1401,11 +1429,18 @@ for port in port_list: # Set up the port request dict pdata = port.get('port') + project_id = pdata.get('project_id') or pdata.get('tenant_id') + security_group_ids = pdata.get('security_groups') + if security_group_ids is const.ATTR_NOT_SPECIFIED: + security_group_ids = None + else: + security_group_ids = set(security_group_ids) if pdata.get('device_owner'): self._enforce_device_owner_not_router_intf_or_device_id( context, pdata.get('device_owner'), - pdata.get('device_id'), pdata.get('tenant_id')) - bulk_port_data = dict(project_id=pdata.get('project_id'), + pdata.get('device_id'), project_id) + bulk_port_data = dict( + project_id=project_id, name=pdata.get('name'), network_id=pdata.get('network_id'), admin_state_up=pdata.get('admin_state_up'), @@ -1413,7 +1448,7 @@ const.PORT_STATUS_ACTIVE), device_id=pdata.get('device_id'), device_owner=pdata.get('device_owner'), - security_groups=pdata.get('security_groups'), + security_group_ids=security_group_ids, description=pdata.get('description')) # Ensure that the networks exist. @@ -1434,6 +1469,7 @@ mac=raw_mac_address) eui_mac_address = netaddr.EUI(raw_mac_address, dialect=eui48.mac_unix_expanded) + port['port']['mac_address'] = str(eui_mac_address) # Create the Port object db_port_obj = ports_obj.Port(context, @@ -1441,40 +1477,59 @@ id=uuidutils.generate_uuid(), **bulk_port_data) db_port_obj.create() - port_dict = self._make_port_dict(db_port_obj, - process_extensions=False) - port_compat = {'port': port_dict} # Call IPAM to allocate IP addresses try: # TODO(njohnston): IPAM allocation needs to be revamped to # be bulk-friendly. - self.ipam.allocate_ips_for_port_and_store( - context, db_port_obj, db_port_obj['id']) + ips = self.ipam.allocate_ips_for_port_and_store( + context, port, db_port_obj['id']) + ipam_fixed_ips = [] + for ip in ips: + fixed_ip = ports_obj.IPAllocation( + port_id=db_port_obj['id'], + subnet_id=ip['subnet_id'], + network_id=network_id, + ip_address=ip['ip_address']) + ipam_fixed_ips.append(fixed_ip) + + db_port_obj['fixed_ips'] = ipam_fixed_ips db_port_obj['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_IMMEDIATE) except ipam_exc.DeferIpam: db_port_obj['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_DEFERRED) + fixed_ips = pdata.get('fixed_ips') if validators.is_attr_set(fixed_ips) and not fixed_ips: # [] was passed explicitly as fixed_ips: unaddressed port. db_port_obj['ip_allocation'] = (ipalloc_apidef. IP_ALLOCATION_NONE) + # Make port dict + port_dict = self._make_port_dict(db_port_obj, + process_extensions=False) + port_dict[portbindings.HOST_ID] = pdata.get( + portbindings.HOST_ID) + port_compat = {'port': port_dict} + # Activities immediately post-port-creation self.extension_manager.process_create_port(context, port_dict, db_port_obj) self._portsec_ext_port_create_processing(context, port_dict, port_compat) - # sgids must be got after portsec checked with security group - sgids = self._get_security_groups_on_port(context, port_compat) - self._process_port_create_security_group(context, port_dict, - sgids) + # Ensure the default security group is assigned, unless one was + # specifically requested + if security_group_ids is None: + self._ensure_security_groups_on_port(context, port_dict) # process port binding binding = db.add_port_binding(context, port_dict['id']) + binding_host = pdata.get( + portbindings.HOST_ID, const.ATTR_NOT_SPECIFIED) + if binding_host != const.ATTR_NOT_SPECIFIED: + binding["host"] = binding_host mech_context = driver_context.PortContext(self, context, port_dict, network, binding, None) @@ -1511,6 +1566,13 @@ # Perform actions after the transaction is committed completed_ports = [] for port in port_data: + # Ensure security groups are assigned to the port, if + # specifically requested + port_dict = port['port_dict'] + if port_dict.get('security_group_ids') is not None: + with db_api.CONTEXT_WRITER.using(context): + self._ensure_security_groups_on_port(context, port_dict) + resource_extend.apply_funcs('ports', port['port_dict'], port['port_obj'].db_obj) @@ -1725,6 +1787,13 @@ # merge into session to reflect changes binding.persist_state_to_session(plugin_context.session) + def delete_distributed_port_bindings_by_router_id(self, context, + router_id): + for binding in (context.session.query(models.DistributedPortBinding). + filter_by(router_id=router_id)): + db.clear_binding_levels(context, binding.port_id, binding.host) + context.session.delete(binding) + @utils.transaction_guard @db_api.retry_if_session_inactive() def update_distributed_port_binding(self, context, id, port): @@ -2153,6 +2222,7 @@ def _get_ports_query(self, context, filters=None, *args, **kwargs): filters = filters or {} security_groups = filters.pop("security_groups", None) + limit = kwargs.pop('limit', None) if security_groups: port_bindings = self._get_port_security_group_bindings( context, filters={'security_group_id': @@ -2172,6 +2242,8 @@ models_v2.IPAllocation.ip_address.like('%%%s%%' % ip)) for ip in ip_addresses_s]) query = query.filter(substr_filter) + if limit: + query = query.limit(limit) return query def filter_hosts_with_network_access( diff -Nru neutron-14.0.2/neutron/plugins/ml2/rpc.py neutron-14.0.3/neutron/plugins/ml2/rpc.py --- neutron-14.0.2/neutron/plugins/ml2/rpc.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/plugins/ml2/rpc.py 2019-10-22 19:46:04.000000000 +0000 @@ -330,11 +330,13 @@ # and so we don't need to update it again here. But, l2pop did not # handle DVR ports while restart neutron-*-agent, we need to handle # it here. - if agent_restarted is None: - agent_restarted = l2pop_driver.obj.agent_restarted(port_context) - if (port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE and - not agent_restarted): - return + if port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE: + if agent_restarted is None: + agent_restarted = l2pop_driver.obj.agent_restarted( + port_context) + if not agent_restarted: + return + port = port_context.current if (port['device_owner'] != n_const.DEVICE_OWNER_DVR_INTERFACE and status == n_const.PORT_STATUS_ACTIVE and @@ -366,9 +368,9 @@ rpc_context, device=device, **kwargs) - except Exception: + except Exception as e: failed_devices_up.append(device) - LOG.error("Failed to update device %s up", device) + LOG.error("Failed to update device %s up: %s", device, e) else: devices_up.append(device) @@ -380,9 +382,9 @@ rpc_context, device=device, **kwargs) - except Exception: + except Exception as e: failed_devices_down.append(device) - LOG.error("Failed to update device %s down", device) + LOG.error("Failed to update device %s down: %s", device, e) else: devices_down.append(dev) diff -Nru neutron-14.0.2/neutron/policy.py neutron-14.0.3/neutron/policy.py --- neutron-14.0.2/neutron/policy.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/policy.py 2019-10-22 19:46:04.000000000 +0000 @@ -380,9 +380,11 @@ "%(target_dict)s", {'field': self.field, 'target_dict': target_dict}) return + project_id = target_dict.get('project_id') + ctx = (context.Context(tenant_id=project_id) if project_id + else context.get_admin_context()) plugin = directory.get_plugin() - network = plugin.get_network( - context.get_admin_context(), target_network_id) + network = plugin.get_network(ctx, target_network_id) target_value = network.get(self.field) if target_value is None: LOG.debug("Unable to find requested field: %(field)s in target: " diff -Nru neutron-14.0.2/neutron/privileged/agent/linux/ip_lib.py neutron-14.0.3/neutron/privileged/agent/linux/ip_lib.py --- neutron-14.0.2/neutron/privileged/agent/linux/ip_lib.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/privileged/agent/linux/ip_lib.py 2019-10-22 19:46:04.000000000 +0000 @@ -11,6 +11,7 @@ # under the License. import errno +import functools import socket from neutron_lib import constants @@ -23,6 +24,7 @@ from pyroute2.netlink.rtnl import ndmsg from pyroute2 import NetlinkError from pyroute2 import netns +import six from neutron._i18n import _ from neutron import privileged @@ -31,6 +33,25 @@ _IP_VERSION_FAMILY_MAP = {4: socket.AF_INET, 6: socket.AF_INET6} +@lockutils.synchronized("privileged-ip-lib") +# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads +# we need to lock this function to workaround this issue. +# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 +def _sync(input_func): + # NOTE(ralonsoh): this is needed because PY2 functools.update_wrapper do + # not handle correctly partial functions (nested decorators). This could be + # removed once we abandon support for PY2. + if six.PY2 and isinstance(input_func, functools.partial): + for asig in functools.WRAPPER_ASSIGNMENTS: + setattr(input_func, asig, '') + + @six.wraps(input_func) + def sync_inner(*args, **kwargs): + return input_func(*args, **kwargs) + + return sync_inner + + def _get_scope_name(scope): """Return the name of the scope (given as a number), or the scope number if the name is unknown. @@ -113,11 +134,8 @@ 'scope': scope} +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def get_routing_table(ip_version, namespace=None): """Return a list of dictionaries, each representing a route. @@ -176,6 +194,14 @@ return pyroute2.IPRoute() +@_sync +@privileged.default.entrypoint +def open_namespace(namespace): + """Open namespace to test if the namespace is ready to be manipulated""" + with pyroute2.NetNS(namespace, flags=0): + pass + + def _translate_ip_device_exception(e, device=None, namespace=None): if e.code == errno.ENODEV: raise NetworkInterfaceNotFound(device=device, namespace=namespace) @@ -234,11 +260,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def add_ip_address(ip_version, ip, prefixlen, device, namespace, scope, broadcast=None): family = _IP_VERSION_FAMILY_MAP[ip_version] @@ -257,11 +280,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def delete_ip_address(ip_version, ip, prefixlen, device, namespace): family = _IP_VERSION_FAMILY_MAP[ip_version] try: @@ -281,11 +301,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def flush_ip_addresses(ip_version, device, namespace): family = _IP_VERSION_FAMILY_MAP[ip_version] try: @@ -298,11 +315,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def create_interface(ifname, namespace, kind, **kwargs): ifname = ifname[:constants.DEVICE_NAME_MAX_LEN] try: @@ -322,20 +336,14 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def delete_interface(ifname, namespace, **kwargs): _run_iproute_link("del", ifname, namespace, **kwargs) +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def interface_exists(ifname, namespace): try: idx = get_link_id(ifname, namespace) @@ -348,31 +356,22 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def set_link_flags(device, namespace, flags): link = _run_iproute_link("get", device, namespace)[0] new_flags = flags | link['flags'] return _run_iproute_link("set", device, namespace, flags=new_flags) +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def set_link_attribute(device, namespace, **attributes): return _run_iproute_link("set", device, namespace, **attributes) +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def get_link_attributes(device, namespace): link = _run_iproute_link("get", device, namespace)[0] return { @@ -387,11 +386,8 @@ } +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def add_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs): """Add a neighbour entry. @@ -412,11 +408,8 @@ **kwargs) +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def delete_neigh_entry(ip_version, ip_address, mac_address, device, namespace, **kwargs): """Delete a neighbour entry. @@ -442,11 +435,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def dump_neigh_entries(ip_version, device, namespace, **kwargs): """Dump all neighbour entries. @@ -539,11 +529,8 @@ return value +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def get_link_devices(namespace, **kwargs): """List interfaces in a namespace @@ -573,11 +560,8 @@ return device_names +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def get_ip_addresses(namespace, **kwargs): """List of IP addresses in a namespace @@ -592,11 +576,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def list_ip_rules(namespace, ip_version, match=None, **kwargs): """List all IP rules""" try: @@ -615,11 +596,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def add_ip_rule(namespace, **kwargs): """Add a new IP rule""" try: @@ -635,11 +613,8 @@ raise +@_sync @privileged.default.entrypoint -# NOTE(slaweq): Because of issue with pyroute2.NetNS objects running in threads -# we need to lock this function to workaround this issue. -# For details please check https://bugs.launchpad.net/neutron/+bug/1811515 -@lockutils.synchronized("privileged-ip-lib") def delete_ip_rule(namespace, **kwargs): """Delete an IP rule""" try: diff -Nru neutron-14.0.2/neutron/service.py neutron-14.0.3/neutron/service.py --- neutron-14.0.2/neutron/service.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/service.py 2019-10-22 19:46:03.000000000 +0000 @@ -176,7 +176,7 @@ workers = cfg.CONF.rpc_workers if workers is None: # By default, half as many rpc workers as api workers - workers = int(_get_worker_count() / 2) + workers = int(_get_api_workers() / 2) if workers < 1: workers = 1 diff -Nru neutron-14.0.2/neutron/services/qos/qos_plugin.py neutron-14.0.3/neutron/services/qos/qos_plugin.py --- neutron-14.0.2/neutron/services/qos/qos_plugin.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/services/qos/qos_plugin.py 2019-10-22 19:46:04.000000000 +0000 @@ -85,31 +85,23 @@ @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_resource_request(port_res, port_db): """Add resource request to a port.""" - port_res['resource_request'] = None - qos_policy = policy_object.QosPolicy.get_port_policy( - context.get_admin_context(), port_res['id']) - # Note(lajoskatona): QosPolicyPortBinding is not ready for some - # reasons, so let's try and fetch the QoS policy directly if there is a - # qos_policy_id in port_res. - if (not qos_policy and 'qos_policy_id' in port_res and - port_res['qos_policy_id']): - qos_policy = policy_object.QosPolicy.get_policy_obj( - context.get_admin_context(), port_res['qos_policy_id'] - ) - - # Note(lajoskatona): handle the case when the port inherits qos-policy - # from the network. - if not qos_policy: - net = network_object.Network.get_object( - context.get_admin_context(), id=port_res['network_id']) - if net and net.qos_policy_id: - qos_policy = policy_object.QosPolicy.get_network_policy( - context.get_admin_context(), net.id) + if isinstance(port_db, ports_object.Port): + qos_id = port_db.qos_policy_id or port_db.qos_network_policy_id + else: + qos_id = None + if port_db.get('qos_policy_binding'): + qos_id = port_db.qos_policy_binding.policy_id + elif port_db.get('qos_network_policy_binding'): + qos_id = port_db.qos_network_policy_binding.policy_id - if not qos_policy: + port_res['resource_request'] = None + if not qos_id: return port_res + qos_policy = policy_object.QosPolicy.get_object( + context.get_admin_context(), id=qos_id) resources = {} + # NOTE(ralonsoh): we should move this translation dict to n-lib. rule_direction_class = { nl_constants.INGRESS_DIRECTION: pl_constants.CLASS_NET_BW_INGRESS_KBPS, @@ -122,6 +114,10 @@ if not resources: return port_res + # NOTE(ralonsoh): we should not rely on the current execution order of + # the port extending functions. Although here we have + # port_res[VNIC_TYPE], we should retrieve this value from the port DB + # object instead. vnic_trait = pl_utils.vnic_type_trait( port_res[portbindings.VNIC_TYPE]) @@ -129,19 +125,16 @@ # support will be available. See Placement spec: # https://review.openstack.org/565730 first_segment = network_object.NetworkSegment.get_objects( - context.get_admin_context(), - network_id=port_res['network_id'])[0] + context.get_admin_context(), network_id=port_db.network_id)[0] if not first_segment or not first_segment.physical_network: return port_res physnet_trait = pl_utils.physnet_trait( first_segment.physical_network) - resource_request = { + port_res['resource_request'] = { 'required': [physnet_trait, vnic_trait], - 'resources': resources - } - port_res['resource_request'] = resource_request + 'resources': resources} return port_res def _get_ports_with_policy(self, context, policy): diff -Nru neutron-14.0.2/neutron/services/trunk/rpc/server.py neutron-14.0.3/neutron/services/trunk/rpc/server.py --- neutron-14.0.2/neutron/services/trunk/rpc/server.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/services/trunk/rpc/server.py 2019-10-22 19:46:04.000000000 +0000 @@ -21,6 +21,7 @@ from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging +from sqlalchemy.orm import exc from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks.producer import registry @@ -115,11 +116,22 @@ trunk_port = self.core_plugin.get_port(context, trunk_port_id) trunk_host = trunk_port.get(portbindings.HOST_ID) - # NOTE(status_police) Set the trunk in BUILD state before processing - # subport bindings. The trunk will stay in BUILD state until an - # attempt has been made to bind all subports passed here and the - # agent acknowledges the operation was successful. - trunk.update(status=trunk_consts.BUILD_STATUS) + for try_cnt in range(db_api.MAX_RETRIES): + try: + # NOTE(status_police) Set the trunk in BUILD state before + # processing subport bindings. The trunk will stay in BUILD + # state until an attempt has been made to bind all subports + # passed here and the agent acknowledges the operation was + # successful. + trunk.update(status=trunk_consts.BUILD_STATUS) + break + except exc.StaleDataError as e: + if try_cnt < db_api.MAX_RETRIES - 1: + LOG.debug("Got StaleDataError exception: %s", e) + continue + else: + # re-raise when all tries failed + raise for port_id in port_ids: try: diff -Nru neutron-14.0.2/neutron/tests/fullstack/base.py neutron-14.0.3/neutron/tests/fullstack/base.py --- neutron-14.0.2/neutron/tests/fullstack/base.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/fullstack/base.py 2019-10-22 19:46:04.000000000 +0000 @@ -19,6 +19,7 @@ from oslo_config import cfg from oslo_log import log as logging +from neutron.agent.linux import ip_lib from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.tests import base as tests_base @@ -150,3 +151,8 @@ "Port", vm.port.name, "tag", network.get("provider:segmentation_id")) return vm + + def assert_namespace_exists(self, ns_name): + common_utils.wait_until_true( + lambda: ip_lib.network_namespace_exists(ns_name, + try_is_ready=True)) diff -Nru neutron-14.0.2/neutron/tests/fullstack/resources/config.py neutron-14.0.3/neutron/tests/fullstack/resources/config.py --- neutron-14.0.2/neutron/tests/fullstack/resources/config.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/fullstack/resources/config.py 2019-10-22 19:46:04.000000000 +0000 @@ -83,7 +83,8 @@ 'lock_path': '$state_path/lock', }, 'agent': { - 'report_interval': str(env_desc.agent_down_time / 2.0) + 'report_interval': str(env_desc.agent_down_time / 2.0), + 'log_agent_heartbeats': 'True', }, }) policy_file = self._generate_policy_json() @@ -172,7 +173,8 @@ 'local_ip': local_ip, 'integration_bridge': self._generate_integration_bridge(), 'of_interface': host_desc.of_interface, - 'bridge_mappings': '%s:%s' % (PHYSICAL_NETWORK_NAME, ext_dev) + 'bridge_mappings': '%s:%s' % (PHYSICAL_NETWORK_NAME, ext_dev), + 'of_inactivity_probe': '0', }, 'securitygroup': { 'firewall_driver': host_desc.firewall_driver, @@ -180,7 +182,8 @@ 'agent': { 'l2_population': str(self.env_desc.l2_pop), 'arp_responder': str(self.env_desc.arp_responder), - 'debug_iptables_rules': str(env_desc.debug_iptables) + 'debug_iptables_rules': str(env_desc.debug_iptables), + 'use_helper_for_ns_read': 'False', } }) @@ -291,7 +294,8 @@ 'firewall_driver': host_desc.firewall_driver, }, 'AGENT': { - 'debug_iptables_rules': str(env_desc.debug_iptables) + 'debug_iptables_rules': str(env_desc.debug_iptables), + 'use_helper_for_ns_read': 'False', } }) if env_desc.qos: @@ -338,6 +342,9 @@ 'debug': 'True', 'test_namespace_suffix': self._generate_namespace_suffix(), }) + self.config.update({ + 'agent': {'use_helper_for_ns_read': 'False'} + }) if host_desc.availability_zone: self.config['agent'].update({ 'availability_zone': host_desc.availability_zone @@ -376,8 +383,11 @@ 'dhcp_confs': self._generate_dhcp_path(), 'test_namespace_suffix': self._generate_namespace_suffix() }) + self.config.update({ + 'AGENT': {'use_helper_for_ns_read': 'False'} + }) if host_desc.availability_zone: - self.config['agent'].update({ + self.config['AGENT'].update({ 'availability_zone': host_desc.availability_zone }) diff -Nru neutron-14.0.2/neutron/tests/fullstack/test_dhcp_agent.py neutron-14.0.3/neutron/tests/fullstack/test_dhcp_agent.py --- neutron-14.0.2/neutron/tests/fullstack/test_dhcp_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/fullstack/test_dhcp_agent.py 2019-10-22 19:46:03.000000000 +0000 @@ -107,8 +107,9 @@ namespace = dhcp_agent._get_namespace_name( self.network['id'], suffix=self.environment.hosts[0].dhcp_agent.get_namespace_suffix()) - ip = ip_lib.IPWrapper(namespace) + self.assert_namespace_exists(namespace) + ip = ip_lib.IPWrapper(namespace) devices = ip.get_devices() self.assertEqual(1, len(devices)) @@ -124,7 +125,7 @@ class TestDhcpAgentHA(BaseDhcpAgentTest): number_of_hosts = 2 - agent_down_time = 10 + agent_down_time = 30 def _wait_until_network_rescheduled(self, old_agent): def _agent_rescheduled(): diff -Nru neutron-14.0.2/neutron/tests/fullstack/test_l3_agent.py neutron-14.0.3/neutron/tests/fullstack/test_l3_agent.py --- neutron-14.0.2/neutron/tests/fullstack/test_l3_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/fullstack/test_l3_agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -130,10 +130,6 @@ def _get_namespace(self, router_id): return namespaces.build_ns_name(namespaces.NS_PREFIX, router_id) - def _assert_namespace_exists(self, ns_name): - common_utils.wait_until_true( - lambda: ip_lib.network_namespace_exists(ns_name)) - def test_namespace_exists(self): tenant_id = uuidutils.generate_uuid() @@ -146,7 +142,7 @@ namespace = "%s@%s" % ( self._get_namespace(router['id']), self.environment.hosts[0].l3_agent.get_namespace_suffix(), ) - self._assert_namespace_exists(namespace) + self.assert_namespace_exists(namespace) def test_mtu_update(self): tenant_id = uuidutils.generate_uuid() @@ -160,7 +156,7 @@ namespace = "%s@%s" % ( self._get_namespace(router['id']), self.environment.hosts[0].l3_agent.get_namespace_suffix(), ) - self._assert_namespace_exists(namespace) + self.assert_namespace_exists(namespace) ip = ip_lib.IPWrapper(namespace) common_utils.wait_until_true(lambda: ip.get_devices()) diff -Nru neutron-14.0.2/neutron/tests/functional/agent/l2/base.py neutron-14.0.3/neutron/tests/functional/agent/l2/base.py --- neutron-14.0.2/neutron/tests/functional/agent/l2/base.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/agent/l2/base.py 2019-10-22 19:46:04.000000000 +0000 @@ -287,7 +287,8 @@ 'failed_devices_down': []} def setup_agent_rpc_mocks(self, agent, unplug_ports): - def mock_device_details(context, devices, agent_id, host=None): + def mock_device_details(context, devices, agent_id, host=None, + agent_restarted=False): details = [] for port in self.ports: if port['id'] in devices: @@ -379,6 +380,7 @@ self.ports = port_dicts self.agent = self.create_agent(create_tunnels=create_tunnels, ancillary_bridge=ancillary_bridge) + self.agent.iter_num += 1 self.polling_manager = self.start_agent(self.agent, ports=self.ports) self.network = network or self._create_test_network_dict() if trigger_resync: diff -Nru neutron-14.0.2/neutron/tests/functional/agent/l3/framework.py neutron-14.0.3/neutron/tests/functional/agent/l3/framework.py --- neutron-14.0.2/neutron/tests/functional/agent/l3/framework.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/agent/l3/framework.py 2019-10-22 19:46:04.000000000 +0000 @@ -133,6 +133,12 @@ enable_pf_floating_ip), qos_policy_id=qos_policy_id) + def change_router_state(self, router_id, state): + ri = self.agent.router_info.get(router_id) + if not ri: + self.fail('Router %s is not present in the L3 agent' % router_id) + ri.ha_state = state + def _test_conntrack_disassociate_fip(self, ha): '''Test that conntrack immediately drops stateful connection that uses floating IP once it's disassociated. @@ -494,7 +500,8 @@ # so there's no need to check that explicitly. self.assertFalse(self._namespace_exists(router.ns_name)) common_utils.wait_until_true( - lambda: not self._metadata_proxy_exists(self.agent.conf, router)) + lambda: not self._metadata_proxy_exists(self.agent.conf, router), + timeout=10) def _assert_snat_chains(self, router): self.assertFalse(router.iptables_manager.is_chain_empty( diff -Nru neutron-14.0.2/neutron/tests/functional/agent/l3/test_ha_router.py neutron-14.0.3/neutron/tests/functional/agent/l3/test_ha_router.py --- neutron-14.0.2/neutron/tests/functional/agent/l3/test_ha_router.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/agent/l3/test_ha_router.py 2019-10-22 19:46:04.000000000 +0000 @@ -37,7 +37,8 @@ def test_keepalived_state_change_notification(self): enqueue_mock = mock.patch.object( - self.agent, 'enqueue_state_change').start() + self.agent, 'enqueue_state_change', + side_effect=self.change_router_state).start() router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) common_utils.wait_until_true(lambda: router.ha_state == 'master') diff -Nru neutron-14.0.2/neutron/tests/functional/agent/linux/test_ip_lib.py neutron-14.0.3/neutron/tests/functional/agent/linux/test_ip_lib.py --- neutron-14.0.2/neutron/tests/functional/agent/linux/test_ip_lib.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/agent/linux/test_ip_lib.py 2019-10-22 19:46:04.000000000 +0000 @@ -21,6 +21,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils +from oslo_utils import uuidutils import testtools from neutron.agent.linux import ip_lib @@ -634,3 +635,29 @@ self.assertFalse(failed) self.assertEqual(expected, observed) + + +class NamespaceTestCase(functional_base.BaseSudoTestCase): + + def setUp(self): + super(NamespaceTestCase, self).setUp() + self.namespace = 'test_ns_' + uuidutils.generate_uuid() + ip_lib.create_network_namespace(self.namespace) + self.addCleanup(self._delete_namespace) + + def _delete_namespace(self): + ip_lib.delete_network_namespace(self.namespace) + + def test_network_namespace_exists_ns_exists(self): + self.assertTrue(ip_lib.network_namespace_exists(self.namespace)) + + def test_network_namespace_exists_ns_doesnt_exists(self): + self.assertFalse(ip_lib.network_namespace_exists('another_ns')) + + def test_network_namespace_exists_ns_exists_try_is_ready(self): + self.assertTrue(ip_lib.network_namespace_exists(self.namespace, + try_is_ready=True)) + + def test_network_namespace_exists_ns_doesnt_exists_try_is_ready(self): + self.assertFalse(ip_lib.network_namespace_exists('another_ns', + try_is_ready=True)) diff -Nru neutron-14.0.2/neutron/tests/functional/agent/test_l2_ovs_agent.py neutron-14.0.3/neutron/tests/functional/agent/test_l2_ovs_agent.py --- neutron-14.0.2/neutron/tests/functional/agent/test_l2_ovs_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/agent/test_l2_ovs_agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -243,6 +243,7 @@ patch_int_ofport_before = self.agent.patch_int_ofport patch_tun_ofport_before = self.agent.patch_tun_ofport + self.stop_agent(self.agent, self.agent_thread) self.setup_agent_and_ports(port_dicts=[], create_tunnels=True) self.assertEqual(patch_int_ofport_before, self.agent.patch_int_ofport) self.assertEqual(patch_tun_ofport_before, self.agent.patch_tun_ofport) @@ -254,6 +255,7 @@ patch_int_ofport_before = self.agent.int_ofports['physnet'] patch_phys_ofport_before = self.agent.phys_ofports['physnet'] + self.stop_agent(self.agent, self.agent_thread) self.setup_agent_and_ports(port_dicts=[]) self.assertEqual(patch_int_ofport_before, self.agent.int_ofports['physnet']) diff -Nru neutron-14.0.2/neutron/tests/functional/base.py neutron-14.0.3/neutron/tests/functional/base.py --- neutron-14.0.2/neutron/tests/functional/base.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/base.py 2019-10-22 19:46:04.000000000 +0000 @@ -16,10 +16,12 @@ import os import warnings +import mock from oslo_config import cfg from neutron.agent.linux import utils from neutron.conf.agent import common as config +from neutron.conf.agent import ovs_conf from neutron.tests import base from neutron.tests.common import base as common_base from neutron.tests.common import helpers @@ -29,6 +31,14 @@ 'dsvm-functional-logs') +def config_decorator(method_to_decorate, config_tuples): + def wrapper(*args, **kwargs): + method_to_decorate(*args, **kwargs) + for config_tuple in config_tuples: + cfg.CONF.set_override(*config_tuple) + return wrapper + + class BaseLoggingTestCase(base.BaseTestCase): def setUp(self): super(BaseLoggingTestCase, self).setUp() @@ -66,6 +76,7 @@ self.skipTest('Testing with sudo is not enabled') self.setup_rootwrap() config.setup_privsep() + self._override_default_config() @common_base.no_skip_on_missing_deps def check_command(self, cmd, error_text, skip_msg, run_as_root=False): @@ -75,3 +86,13 @@ if error_text in str(e): self.skipTest(skip_msg) raise + + @staticmethod + def _override_default_config(): + # NOTE(ralonsoh): once https://review.openstack.org/#/c/641681/ is + # merged, we should increase the default value of those new parameters. + ovs_agent_opts = [('ovsdb_timeout', 30, 'OVS')] + ovs_agent_decorator = config_decorator( + ovs_conf.register_ovs_agent_opts, ovs_agent_opts) + mock.patch.object(ovs_conf, 'register_ovs_agent_opts', + new=ovs_agent_decorator).start() diff -Nru neutron-14.0.2/neutron/tests/functional/pecan_wsgi/test_controllers.py neutron-14.0.3/neutron/tests/functional/pecan_wsgi/test_controllers.py --- neutron-14.0.2/neutron/tests/functional/pecan_wsgi/test_controllers.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/pecan_wsgi/test_controllers.py 2019-10-22 19:46:03.000000000 +0000 @@ -492,10 +492,45 @@ 'tenant_id': 'tenid'}] }, headers={'X-Project-Id': 'tenid'}) - self.assertEqual(response.status_int, 201) + self.assertEqual(201, response.status_int) json_body = jsonutils.loads(response.body) self.assertIn('ports', json_body) - self.assertEqual(2, len(json_body['ports'])) + ports = json_body['ports'] + self.assertEqual(2, len(ports)) + for port in ports: + self.assertEqual(1, len(port['security_groups'])) + + def test_bulk_create_with_sg(self): + sg_response = self.app.post_json( + '/v2.0/security-groups.json', + params={'security_group': { + "name": "functest", + "description": "Functional test"}}, + headers={'X-Project-Id': 'tenid'}) + self.assertEqual(201, sg_response.status_int) + sg_json_body = jsonutils.loads(sg_response.body) + self.assertIn('security_group', sg_json_body) + sg_id = sg_json_body['security_group']['id'] + + port_response = self.app.post_json( + '/v2.0/ports.json', + params={'ports': [{'network_id': self.port['network_id'], + 'admin_state_up': True, + 'security_groups': [sg_id], + 'tenant_id': 'tenid'}, + {'network_id': self.port['network_id'], + 'admin_state_up': True, + 'security_groups': [sg_id], + 'tenant_id': 'tenid'}] + }, + headers={'X-Project-Id': 'tenid'}) + self.assertEqual(201, port_response.status_int) + json_body = jsonutils.loads(port_response.body) + self.assertIn('ports', json_body) + ports = json_body['ports'] + self.assertEqual(2, len(ports)) + for port in ports: + self.assertEqual(1, len(port['security_groups'])) def test_emulated_bulk_create(self): self.plugin._FORCE_EMULATED_BULK = True diff -Nru neutron-14.0.2/neutron/tests/functional/privileged/agent/linux/test_ip_lib.py neutron-14.0.3/neutron/tests/functional/privileged/agent/linux/test_ip_lib.py --- neutron-14.0.2/neutron/tests/functional/privileged/agent/linux/test_ip_lib.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/functional/privileged/agent/linux/test_ip_lib.py 2019-10-22 19:46:04.000000000 +0000 @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +import random + from oslo_utils import uuidutils import testtools @@ -183,42 +185,45 @@ self.assertEqual(sorted(interfaces_tested), sorted(self.interfaces + vxlan_interfaces)) + def _retrieve_interface(self, interface_name, namespace): + for device in priv_ip_lib.get_link_devices(namespace): + if interface_name == ip_lib.get_attr(device, 'IFLA_IFNAME'): + return device + else: + self.fail('Interface "%s" not found' % interface_name) + def test_get_devices_info_veth_different_namespaces(self): namespace2 = 'ns_test-' + uuidutils.generate_uuid() priv_ip_lib.create_netns(namespace2) self.addCleanup(self._remove_ns, namespace2) + # Create a random number of dummy interfaces in namespace2, in order + # to increase the 'veth1_2' interface index in its namespace. + for idx in range(5, random.randint(15, 20)): + priv_ip_lib.create_interface('int_%s' % idx, namespace2, 'dummy') + ip_wrapper = ip_lib.IPWrapper(self.namespace) ip_wrapper.add_veth('veth1_1', 'veth1_2', namespace2) - devices = priv_ip_lib.get_link_devices(self.namespace) - for device in devices: - name = ip_lib.get_attr(device, 'IFLA_IFNAME') - if name == 'veth1_1': - veth1_1 = device - break - else: - self.fail('Interface "veth1_1" not found') + veth1_1 = self._retrieve_interface('veth1_1', self.namespace) + veth1_2 = self._retrieve_interface('veth1_2', namespace2) ifla_linkinfo = ip_lib.get_attr(veth1_1, 'IFLA_LINKINFO') self.assertEqual(ip_lib.get_attr(ifla_linkinfo, 'IFLA_INFO_KIND'), 'veth') - self.assertIsNone(ip_lib.get_attr(veth1_1, 'IFLA_LINK')) + # NOTE(ralonsoh): since kernel_version=4.15.0-60-generic, iproute2 + # provides the veth pair index, even if the pair interface is in other + # namespace. In previous versions, the parameter 'IFLA_LINK' was not + # present. We need to handle both cases. + self.assertIn(ip_lib.get_attr(veth1_1, 'IFLA_LINK'), + [None, veth1_2['index']]) def test_get_devices_info_veth_same_namespaces(self): ip_wrapper = ip_lib.IPWrapper(self.namespace) ip_wrapper.add_veth('veth1_1', 'veth1_2') - devices = priv_ip_lib.get_link_devices(self.namespace) - veth1_1 = veth1_2 = None - for device in devices: - name = ip_lib.get_attr(device, 'IFLA_IFNAME') - if name == 'veth1_1': - veth1_1 = device - elif name == 'veth1_2': - veth1_2 = device + veth1_1 = self._retrieve_interface('veth1_1', self.namespace) + veth1_2 = self._retrieve_interface('veth1_2', self.namespace) - self.assertIsNotNone(veth1_1) - self.assertIsNotNone(veth1_2) veth1_1_link = ip_lib.get_attr(veth1_1, 'IFLA_LINK') veth1_2_link = ip_lib.get_attr(veth1_2, 'IFLA_LINK') self.assertEqual(veth1_1['index'], veth1_2_link) diff -Nru neutron-14.0.2/neutron/tests/unit/agent/dhcp/test_agent.py neutron-14.0.3/neutron/tests/unit/agent/dhcp/test_agent.py --- neutron-14.0.2/neutron/tests/unit/agent/dhcp/test_agent.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/agent/dhcp/test_agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -482,6 +482,32 @@ # should have been called with all ports again after the failure ready.assert_has_calls([mock.call(set(range(4)))] * 2) + def test_dhcp_ready_ports_loop_with_limit_ports_per_call(self): + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + sync_max = dhcp_agent.DHCP_READY_PORTS_SYNC_MAX + port_count = sync_max + 1 + dhcp.dhcp_ready_ports = set(range(port_count)) + + with mock.patch.object(dhcp.plugin_rpc, + 'dhcp_ready_on_ports') as ready: + # exit after 2 iterations + with mock.patch.object(dhcp_agent.eventlet, 'sleep', + side_effect=[0, 0, RuntimeError]): + with testtools.ExpectedException(RuntimeError): + dhcp._dhcp_ready_ports_loop() + + # all ports should have been processed + self.assertEqual(set(), dhcp.dhcp_ready_ports) + # two calls are expected, one with DHCP_READY_PORTS_SYNC_MAX ports, + # second one with one port + self.assertEqual(2, ready.call_count) + self.assertEqual(sync_max, len(ready.call_args_list[0][0][0])) + self.assertEqual(1, len(ready.call_args_list[1][0][0])) + # all ports need to be ready + ports_ready = (ready.call_args_list[0][0][0] | + ready.call_args_list[1][0][0]) + self.assertEqual(set(range(port_count)), ports_ready) + def test_dhcp_ready_ports_updates_after_enable_dhcp(self): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) self.assertEqual(set(), dhcp.dhcp_ready_ports) diff -Nru neutron-14.0.2/neutron/tests/unit/agent/l3/test_agent.py neutron-14.0.3/neutron/tests/unit/agent/l3/test_agent.py --- neutron-14.0.2/neutron/tests/unit/agent/l3/test_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/agent/l3/test_agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -228,23 +228,59 @@ # Make sure the exceptional code path has coverage agent.enqueue_state_change(non_existent_router, 'master') + def _enqueue_state_change_transitions(self, transitions, num_called): + self.conf.set_override('ha_vrrp_advert_int', 1) + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + agent._update_transition_state('router_id') + with mock.patch.object(agent, '_get_router_info', return_value=None) \ + as mock_get_router_info: + for state in transitions: + agent.enqueue_state_change('router_id', state) + eventlet.sleep(0.2) + # NOTE(ralonsoh): the wait process should be done inside the mock + # context, to allow the spawned thread to call the mocked function + # before the context ends. + eventlet.sleep(self.conf.ha_vrrp_advert_int + 2) + + if num_called: + mock_get_router_info.assert_has_calls( + [mock.call('router_id') for _ in range(num_called)]) + else: + mock_get_router_info.assert_not_called() + + def test_enqueue_state_change_from_none_to_master(self): + self._enqueue_state_change_transitions(['master'], 1) + + def test_enqueue_state_change_from_none_to_backup(self): + self._enqueue_state_change_transitions(['backup'], 1) + + def test_enqueue_state_change_from_none_to_master_to_backup(self): + self._enqueue_state_change_transitions(['master', 'backup'], 0) + + def test_enqueue_state_change_from_none_to_backup_to_master(self): + self._enqueue_state_change_transitions(['backup', 'master'], 2) + def test_enqueue_state_change_metadata_disable(self): self.conf.set_override('enable_metadata_proxy', False) + self.conf.set_override('ha_vrrp_advert_int', 1) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router_info = mock.MagicMock() agent.router_info[router.id] = router_info agent._update_metadata_proxy = mock.Mock() agent.enqueue_state_change(router.id, 'master') + eventlet.sleep(self.conf.ha_vrrp_advert_int + 2) self.assertFalse(agent._update_metadata_proxy.call_count) def test_enqueue_state_change_l3_extension(self): + self.conf.set_override('ha_vrrp_advert_int', 1) agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = mock.Mock() router_info = mock.MagicMock() agent.router_info[router.id] = router_info agent.l3_ext_manager.ha_state_change = mock.Mock() agent.enqueue_state_change(router.id, 'master') + eventlet.sleep(self.conf.ha_vrrp_advert_int + 2) agent.l3_ext_manager.ha_state_change.assert_called_once_with( agent.context, {'router_id': router.id, 'state': 'master'}) diff -Nru neutron-14.0.2/neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py neutron-14.0.3/neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py --- neutron-14.0.2/neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/agent/linux/openvswitch_firewall/test_firewall.py 2019-10-22 19:46:04.000000000 +0000 @@ -474,6 +474,7 @@ self.mock_bridge.br.get_vif_port_by_id.return_value = \ fake_ovs_port port = self.firewall.get_or_create_ofport(port_dict) + self.assertIn(of_port.id, self.firewall.sg_port_map.ports.keys()) self.assertEqual(port.ofport, 2) def test_get_or_create_ofport_missing(self): diff -Nru neutron-14.0.2/neutron/tests/unit/agent/linux/test_dhcp.py neutron-14.0.3/neutron/tests/unit/agent/linux/test_dhcp.py --- neutron-14.0.2/neutron/tests/unit/agent/linux/test_dhcp.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/agent/linux/test_dhcp.py 2019-10-22 19:46:04.000000000 +0000 @@ -1252,7 +1252,8 @@ def _test_spawn(self, extra_options, network=FakeDualNetwork(), max_leases=16777216, lease_duration=86400, has_static=True, no_resolv='--no-resolv', - has_stateless=True, dhcp_t1=0, dhcp_t2=0): + has_stateless=True, dhcp_t1=0, dhcp_t2=0, + bridged=True): def mock_get_conf_file_name(kind): return '/dhcp/%s/%s' % (network.id, kind) @@ -1273,8 +1274,12 @@ '--dhcp-match=set:ipxe,175', '--dhcp-userclass=set:ipxe6,iPXE', '--local-service', - '--bind-interfaces', + '--bind-dynamic', ] + if not bridged: + expected += [ + '--bridge-interface=tap0,tap*' + ] seconds = '' if lease_duration == -1: @@ -1348,6 +1353,11 @@ def test_spawn(self): self._test_spawn(['--conf-file=', '--domain=openstacklocal']) + def test_spawn_not_bridged(self): + self.mock_mgr.return_value.driver.bridged = False + self._test_spawn(['--conf-file=', '--domain=openstacklocal'], + bridged=False) + def test_spawn_infinite_lease_duration(self): self.conf.set_override('dhcp_lease_duration', -1) self._test_spawn(['--conf-file=', '--domain=openstacklocal'], diff -Nru neutron-14.0.2/neutron/tests/unit/db/test_db_base_plugin_v2.py neutron-14.0.3/neutron/tests/unit/db/test_db_base_plugin_v2.py --- neutron-14.0.2/neutron/tests/unit/db/test_db_base_plugin_v2.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/db/test_db_base_plugin_v2.py 2019-10-22 19:46:04.000000000 +0000 @@ -586,6 +586,9 @@ if neutron_context: # create a specific auth context for this request req.environ['neutron.context'] = neutron_context + elif hasattr(self, 'tenant_id'): + req.environ['neutron.context'] = context.Context('', + self.tenant_id) return req.get_response(self._api_for_resource(resource)) def _show(self, resource, id, @@ -5233,6 +5236,19 @@ self._test_list_resources('subnet', [], query_params=query_params) + def test_list_subnets_filtering_by_cidr_used_on_create(self): + with self.network() as network: + with self.subnet(network=network, + gateway_ip='10.0.0.1', + cidr='10.0.0.11/24') as v1,\ + self.subnet(network=network, + gateway_ip='10.0.1.1', + cidr='10.0.1.11/24') as v2: + subnets = (v1, v2) + query_params = ('cidr=10.0.0.11/24&cidr=10.0.1.11/24') + self._test_list_resources('subnet', subnets, + query_params=query_params) + def test_list_subnets_filtering_by_unknown_filter(self): if self._skip_filter_validation: self.skipTest("Plugin does not support filter validation") diff -Nru neutron-14.0.2/neutron/tests/unit/db/test_ipam_pluggable_backend.py neutron-14.0.3/neutron/tests/unit/db/test_ipam_pluggable_backend.py --- neutron-14.0.2/neutron/tests/unit/db/test_ipam_pluggable_backend.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/db/test_ipam_pluggable_backend.py 2019-10-22 19:46:04.000000000 +0000 @@ -687,8 +687,8 @@ original_ips, new_ips, mac) mocks['driver'].get_address_request_factory.assert_called_once_with() mocks['ipam']._ipam_get_subnets.assert_called_once_with( - context, network_id=port_dict['network_id'], host=None, - service_type=port_dict['device_owner']) + context, network_id=port_dict['network_id'], fixed_configured=True, + host=None, service_type=port_dict['device_owner']) # Validate port_dict is passed into address_factory address_factory.get_request.assert_called_once_with(context, port_dict, diff -Nru neutron-14.0.2/neutron/tests/unit/db/test_l3_db.py neutron-14.0.3/neutron/tests/unit/db/test_l3_db.py --- neutron-14.0.2/neutron/tests/unit/db/test_l3_db.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/db/test_l3_db.py 2019-10-22 19:46:04.000000000 +0000 @@ -22,6 +22,7 @@ from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc +from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from oslo_utils import uuidutils @@ -30,8 +31,12 @@ from neutron.db import l3_db from neutron.db.models import l3 as l3_models from neutron.objects import base as base_obj +from neutron.objects import network as network_obj +from neutron.objects import ports as port_obj from neutron.objects import router as l3_obj +from neutron.objects import subnet as subnet_obj from neutron.tests import base +from neutron.tests.unit.db import test_db_base_plugin_v2 class TestL3_NAT_dbonly_mixin(base.BaseTestCase): @@ -372,3 +377,148 @@ self.assertRaises( n_exc.BadRequest, self.db.add_router_interface, mock.Mock(), router_db.id) + + +class FakeL3Plugin(l3_db.L3_NAT_dbonly_mixin): + pass + + +class L3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): + + GET_PORTS_BY_ROUTER_MSG = ( + 'The following ports, assigned to router %(router_id)s, do not have a ' + '"routerport" register: %(port_ids)s') + + def setUp(self, *args, **kwargs): + super(L3TestCase, self).setUp(plugin='ml2') + self.core_plugin = directory.get_plugin() + self.ctx = context.get_admin_context() + self.mixin = FakeL3Plugin() + directory.add_plugin(plugin_constants.L3, self.mixin) + self.network = self.create_network() + self.subnets = [] + self.subnets.append(self.create_subnet(self.network, '1.1.1.1', + '1.1.1.0/24')) + self.subnets.append(self.create_subnet(self.network, '1.1.2.1', + '1.1.2.0/24')) + router = {'router': {'name': 'foo_router', 'admin_state_up': True, + 'tenant_id': 'foo_tenant'}} + self.router = self.create_router(router) + self.ports = [] + for subnet in self.subnets: + ipa = str(netaddr.IPNetwork(subnet['subnet']['cidr']).ip + 10) + fixed_ips = [{'subnet_id': subnet['subnet']['id'], + 'ip_address': ipa}] + self.ports.append(self.create_port( + self.network['network']['id'], {'fixed_ips': fixed_ips})) + self.addCleanup(self._clean_objs) + + def _clean_objs(self): + port_obj.Port.delete_objects( + self.ctx, network_id=self.network['network']['id']) + subnet_obj.Subnet.delete_objects( + self.ctx, network_id=self.network['network']['id']) + network_obj.Network.get_object( + self.ctx, id=self.network['network']['id']).delete() + l3_obj.Router.get_object(self.ctx, id=self.router['id']).delete() + + def create_router(self, router): + with self.ctx.session.begin(subtransactions=True): + return self.mixin.create_router(self.ctx, router) + + def create_port(self, net_id, port_info): + with self.ctx.session.begin(subtransactions=True): + return self._make_port(self.fmt, net_id, **port_info) + + def create_network(self, name=None, **kwargs): + name = name or 'network1' + with self.ctx.session.begin(subtransactions=True): + return self._make_network(self.fmt, name, True, **kwargs) + + def create_subnet(self, network, gateway, cidr, **kwargs): + with self.ctx.session.begin(subtransactions=True): + return self._make_subnet(self.fmt, network, gateway, cidr, + **kwargs) + + def _add_router_interfaces(self): + return [self.mixin.add_router_interface( + self.ctx, self.router['id'], + interface_info={'port_id': port['port']['id']}) + for port in self.ports] + + def _check_routerports(self, ri_statuses): + port_ids = [] + for idx, ri_status in enumerate(ri_statuses): + rp_obj = l3_obj.RouterPort.get_object( + self.ctx, port_id=self.ports[idx]['port']['id'], + router_id=self.router['id']) + if ri_status: + self.assertEqual(self.ports[idx]['port']['id'], rp_obj.port_id) + port_ids.append(rp_obj.port_id) + else: + self.assertIsNone(rp_obj) + + _router_obj = l3_obj.Router.get_object(self.ctx, id=self.router['id']) + router_port_ids = [rp.port_id for rp in + _router_obj.db_obj.attached_ports] + self.assertEqual(sorted(port_ids), sorted(router_port_ids)) + + @mock.patch.object(port_obj, 'LOG') + def test_remove_router_interface_by_port(self, mock_log): + self._add_router_interfaces() + self._check_routerports((True, True)) + + interface_info = {'port_id': self.ports[0]['port']['id']} + self.mixin.remove_router_interface(self.ctx, self.router['id'], + interface_info) + mock_log.warning.assert_not_called() + self._check_routerports((False, True)) + + @mock.patch.object(port_obj, 'LOG') + def test_remove_router_interface_by_port_removed_rport(self, mock_log): + self._add_router_interfaces() + self._check_routerports((True, True)) + + rp_obj = l3_obj.RouterPort.get_object( + self.ctx, router_id=self.router['id'], + port_id=self.ports[0]['port']['id']) + rp_obj.delete() + + interface_info = {'port_id': self.ports[0]['port']['id']} + self.mixin.remove_router_interface(self.ctx, self.router['id'], + interface_info) + msg_vars = {'router_id': self.router['id'], + 'port_ids': {self.ports[0]['port']['id']}} + mock_log.warning.assert_called_once_with(self.GET_PORTS_BY_ROUTER_MSG, + msg_vars) + self._check_routerports((False, True)) + + @mock.patch.object(port_obj, 'LOG') + def test_remove_router_interface_by_subnet(self, mock_log): + self._add_router_interfaces() + self._check_routerports((True, True)) + + interface_info = {'subnet_id': self.subnets[1]['subnet']['id']} + self.mixin.remove_router_interface(self.ctx, self.router['id'], + interface_info) + mock_log.warning.not_called_once() + self._check_routerports((True, False)) + + @mock.patch.object(port_obj, 'LOG') + def test_remove_router_interface_by_subnet_removed_rport(self, mock_log): + self._add_router_interfaces() + self._check_routerports((True, True)) + + rp_obj = l3_obj.RouterPort.get_object( + self.ctx, router_id=self.router['id'], + port_id=self.ports[0]['port']['id']) + rp_obj.delete() + + interface_info = {'subnet_id': self.subnets[0]['subnet']['id']} + self.mixin.remove_router_interface(self.ctx, self.router['id'], + interface_info) + msg_vars = {'router_id': self.router['id'], + 'port_ids': {self.ports[0]['port']['id']}} + mock_log.warning.assert_called_once_with(self.GET_PORTS_BY_ROUTER_MSG, + msg_vars) + self._check_routerports((False, True)) diff -Nru neutron-14.0.2/neutron/tests/unit/db/test_l3_dvr_db.py neutron-14.0.3/neutron/tests/unit/db/test_l3_dvr_db.py --- neutron-14.0.2/neutron/tests/unit/db/test_l3_dvr_db.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/db/test_l3_dvr_db.py 2019-10-22 19:46:04.000000000 +0000 @@ -132,6 +132,32 @@ self.assertEqual(1, self.mixin._migrate_router_ports.call_count) + def test_update_router_db_distributed_to_centralized(self): + router = {'name': 'foo_router', 'admin_state_up': True, + 'distributed': True} + agent = {'id': _uuid(), 'host': 'xyz'} + router_db = self._create_router(router) + router_id = router_db['id'] + self.assertTrue(router_db.extra_attributes.distributed) + self.mixin._get_router = mock.Mock(return_value=router_db) + self.mixin._validate_router_migration = mock.Mock() + self.mixin._migrate_router_ports = mock.Mock() + self.mixin._core_plugin.\ + delete_distributed_port_bindings_by_router_id = mock.Mock() + self.mixin.list_l3_agents_hosting_router = mock.Mock( + return_value={'agents': [agent]}) + self.mixin._unbind_router = mock.Mock() + updated_router = self.mixin.update_router(self.ctx, router_id, + {'router': {'distributed': False}}) + # Assert that the DB value has changed + self.assertFalse(updated_router['distributed']) + self.assertEqual(1, + self.mixin._migrate_router_ports.call_count) + self.assertEqual( + 1, + self.mixin._core_plugin. + delete_distributed_port_bindings_by_router_id.call_count) + def _test_get_device_owner(self, is_distributed=False, expected=const.DEVICE_OWNER_ROUTER_INTF, pass_router_id=True): diff -Nru neutron-14.0.2/neutron/tests/unit/db/test_rbac_db_mixin.py neutron-14.0.3/neutron/tests/unit/db/test_rbac_db_mixin.py --- neutron-14.0.2/neutron/tests/unit/db/test_rbac_db_mixin.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/db/test_rbac_db_mixin.py 2019-10-22 19:46:04.000000000 +0000 @@ -54,6 +54,7 @@ 'admin_state_up': True, 'device_id': 'device_id', 'device_owner': 'device_owner', + 'project_id': target_tenant, 'tenant_id': target_tenant}} port = self.plugin.create_port(self.context, test_port) diff -Nru neutron-14.0.2/neutron/tests/unit/extensions/test_segment.py neutron-14.0.3/neutron/tests/unit/extensions/test_segment.py --- neutron-14.0.2/neutron/tests/unit/extensions/test_segment.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/extensions/test_segment.py 2019-10-22 19:46:04.000000000 +0000 @@ -1173,8 +1173,8 @@ self.assertEqual(segment_exc.HostConnectedToMultipleSegments.__name__, res['NeutronError']['type']) - def test_port_update_excludes_hosts_on_segments(self): - """No binding information is provided, subnets on segments""" + def test_port_update_with_fixed_ips_ok_if_no_binding_host(self): + """No binding host information is provided, subnets on segments""" with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], @@ -1194,7 +1194,33 @@ port_req = self.new_update_request('ports', data, port_id) response = port_req.get_response(self.api) - # Gets bad request because there are no eligible subnets. + # The IP is allocated since there is no binding host info any + # subnet can be used for allocation. + self.assertEqual(webob.exc.HTTPOk.code, response.status_int) + + def test_port_update_with_fixed_ips_fail_if_host_not_on_segment(self): + """Binding information is provided, subnets on segments. Update to + subnet on different segment fails. + """ + network, segments, subnets = self._create_test_segments_with_subnets(2) + + # Setup host mappings + self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost')]) + + # Create a port and validate immediate ip allocation + res = self._create_port_and_show(network, + arg_list=(portbindings.HOST_ID,), + **{portbindings.HOST_ID: 'fakehost'}) + self._validate_immediate_ip_allocation(res['port']['id']) + + # Try requesting an new IP, but the subnet does not match host segment + port_id = res['port']['id'] + data = {'port': { + 'fixed_ips': [{'subnet_id': subnets[1]['subnet']['id']}]}} + port_req = self.new_update_request('ports', data, port_id) + response = port_req.get_response(self.api) + + # Port update fails. self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) def _create_port_and_show(self, network, **kwargs): diff -Nru neutron-14.0.2/neutron/tests/unit/notifiers/test_batch_notifier.py neutron-14.0.3/neutron/tests/unit/notifiers/test_batch_notifier.py --- neutron-14.0.2/neutron/tests/unit/notifiers/test_batch_notifier.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/notifiers/test_batch_notifier.py 2019-10-22 19:46:04.000000000 +0000 @@ -16,6 +16,7 @@ import eventlet import mock +from neutron.common import utils from neutron.notifiers import batch_notifier from neutron.tests import base @@ -23,41 +24,54 @@ class TestBatchNotifier(base.BaseTestCase): def setUp(self): super(TestBatchNotifier, self).setUp() - self.notifier = batch_notifier.BatchNotifier(0.1, lambda x: x) - self.spawn_n_p = mock.patch('eventlet.spawn_n') - self.spawn_n = self.spawn_n_p.start() + self._received_events = eventlet.Queue() + self.notifier = batch_notifier.BatchNotifier(2, self._queue_events) + self.spawn_n_p = mock.patch.object(eventlet, 'spawn_n') + + def _queue_events(self, events): + for event in events: + self._received_events.put(event) def test_queue_event_no_event(self): + spawn_n = self.spawn_n_p.start() self.notifier.queue_event(None) - self.assertEqual(0, len(self.notifier.pending_events)) - self.assertEqual(0, self.spawn_n.call_count) + self.assertEqual(0, len(self.notifier._pending_events.queue)) + self.assertEqual(0, spawn_n.call_count) def test_queue_event_first_event(self): + spawn_n = self.spawn_n_p.start() self.notifier.queue_event(mock.Mock()) - self.assertEqual(1, len(self.notifier.pending_events)) - self.assertEqual(1, self.spawn_n.call_count) + self.assertEqual(1, len(self.notifier._pending_events.queue)) + self.assertEqual(1, spawn_n.call_count) - def test_queue_event_multiple_events(self): - self.spawn_n_p.stop() - c_mock = mock.patch.object(self.notifier, 'callback').start() - events = 6 - for i in range(0, events): - self.notifier.queue_event(mock.Mock()) + def test_queue_event_multiple_events_notify_method(self): + def _batch_notifier_dequeue(): + while not self.notifier._pending_events.empty(): + self.notifier._pending_events.get() + + c_mock = mock.patch.object(self.notifier, '_notify', + side_effect=_batch_notifier_dequeue).start() + events = 20 + for i in range(events): + self.notifier.queue_event('Event %s' % i) eventlet.sleep(0) # yield to let coro execute - while self.notifier.pending_events: - # wait for coroutines to finish - eventlet.sleep(0.1) + utils.wait_until_true(self.notifier._pending_events.empty, + timeout=5) + # Called twice: when the first thread calls "synced_send" and then, + # in the same loop, when self._pending_events is not empty(). All + # self.notifier.queue_event calls are done in just one + # "batch_interval" (2 secs). self.assertEqual(2, c_mock.call_count) - self.assertEqual(6, sum(len(c[0][0]) for c in c_mock.call_args_list)) - self.assertEqual(0, len(self.notifier.pending_events)) - def test_queue_event_call_send_events(self): - with mock.patch.object(self.notifier, - 'callback') as send_events: - self.spawn_n.side_effect = lambda func: func() - self.notifier.queue_event(mock.Mock()) - while self.notifier.pending_events: - # wait for coroutines to finish - eventlet.sleep(0.1) - self.assertTrue(send_events.called) + def test_queue_event_multiple_events_callback_method(self): + events = 20 + for i in range(events): + self.notifier.queue_event('Event %s' % i) + eventlet.sleep(0) # yield to let coro execute + + utils.wait_until_true(self.notifier._pending_events.empty, + timeout=5) + expected = ['Event %s' % i for i in range(events)] + # Check the events have been handled in the same input order. + self.assertEqual(expected, list(self._received_events.queue)) diff -Nru neutron-14.0.2/neutron/tests/unit/notifiers/test_nova.py neutron-14.0.3/neutron/tests/unit/notifiers/test_nova.py --- neutron-14.0.2/neutron/tests/unit/notifiers/test_nova.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/notifiers/test_nova.py 2019-10-22 19:46:04.000000000 +0000 @@ -294,7 +294,7 @@ self.nova_notifier.send_network_change( 'update_floatingip', original_obj, returned_obj) self.assertEqual( - 2, len(self.nova_notifier.batch_notifier.pending_events)) + 2, len(self.nova_notifier.batch_notifier._pending_events.queue)) returned_obj_non = {'floatingip': {'port_id': None}} event_dis = self.nova_notifier.create_port_changed_event( @@ -302,9 +302,10 @@ event_assoc = self.nova_notifier.create_port_changed_event( 'update_floatingip', original_obj, returned_obj) self.assertEqual( - self.nova_notifier.batch_notifier.pending_events[0], event_dis) + self.nova_notifier.batch_notifier._pending_events.get(), event_dis) self.assertEqual( - self.nova_notifier.batch_notifier.pending_events[1], event_assoc) + self.nova_notifier.batch_notifier._pending_events.get(), + event_assoc) def test_delete_port_notify(self): device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' @@ -365,6 +366,7 @@ self.nova_notifier.notify_port_active_direct(port) self.assertEqual( - 1, len(self.nova_notifier.batch_notifier.pending_events)) - self.assertEqual(expected_event, - self.nova_notifier.batch_notifier.pending_events[0]) + 1, len(self.nova_notifier.batch_notifier._pending_events.queue)) + self.assertEqual( + expected_event, + self.nova_notifier.batch_notifier._pending_events.get()) diff -Nru neutron-14.0.2/neutron/tests/unit/objects/qos/test_policy.py neutron-14.0.3/neutron/tests/unit/objects/qos/test_policy.py --- neutron-14.0.2/neutron/tests/unit/objects/qos/test_policy.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/objects/qos/test_policy.py 2019-10-22 19:46:04.000000000 +0000 @@ -112,50 +112,33 @@ # TODO(ihrachys): stop overriding those test cases, instead base test cases # should be expanded if there are missing bits there to support QoS objects def test_get_objects(self): - admin_context = self.context.elevated() - with mock.patch.object(self.context, 'elevated', - return_value=admin_context) as context_mock: - objs = self._test_class.get_objects(self.context) - context_mock.assert_called_once_with() + objs = self._test_class.get_objects(self.context) self.get_objects_mock.assert_any_call( - self._test_class, admin_context, _pager=None) + self._test_class, self.context, _pager=None) self.assertItemsEqual( [test_base.get_obj_persistent_fields(obj) for obj in self.objs], [test_base.get_obj_persistent_fields(obj) for obj in objs]) def test_get_objects_valid_fields(self): - admin_context = self.context.elevated() - with mock.patch.object( db_api, 'get_objects', return_value=[self.db_objs[0]]) as get_objects_mock: - - with mock.patch.object( - self.context, - 'elevated', - return_value=admin_context) as context_mock: - - objs = self._test_class.get_objects( - self.context, - **self.valid_field_filter) - context_mock.assert_called_once_with() + objs = self._test_class.get_objects( + self.context, + **self.valid_field_filter) get_objects_mock.assert_any_call( - self._test_class, admin_context, _pager=None, + self._test_class, self.context, _pager=None, **self.valid_field_filter) self._check_equal(self.objs[0], objs[0]) def test_get_object(self): - admin_context = self.context.elevated() with mock.patch.object(db_api, 'get_object', - return_value=self.db_objs[0]) as get_object_mock, \ - mock.patch.object(self.context, 'elevated', - return_value=admin_context) as context_mock: + return_value=self.db_objs[0]) as get_object_mock: obj = self._test_class.get_object(self.context, id='fake_id') self.assertTrue(self._is_test_class(obj)) self._check_equal(self.objs[0], obj) - context_mock.assert_called_once_with() get_object_mock.assert_called_once_with( - self._test_class, admin_context, id='fake_id') + self._test_class, self.context, id='fake_id') def test_to_dict_makes_primitive_field_value(self): # is_shared_with_tenant requires DB diff -Nru neutron-14.0.2/neutron/tests/unit/objects/test_base.py neutron-14.0.3/neutron/tests/unit/objects/test_base.py --- neutron-14.0.2/neutron/tests/unit/objects/test_base.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/objects/test_base.py 2019-10-22 19:46:04.000000000 +0000 @@ -1523,10 +1523,12 @@ objclass.db_model(**objclass_fields) ] - def _create_test_network(self, name='test-network1', network_id=None): + def _create_test_network(self, name='test-network1', network_id=None, + qos_policy_id=None): network_id = (uuidutils.generate_uuid() if network_id is None else network_id) - _network = net_obj.Network(self.context, name=name, id=network_id) + _network = net_obj.Network(self.context, name=name, id=network_id, + qos_policy_id=qos_policy_id) _network.create() return _network diff -Nru neutron-14.0.2/neutron/tests/unit/objects/test_objects.py neutron-14.0.3/neutron/tests/unit/objects/test_objects.py --- neutron-14.0.2/neutron/tests/unit/objects/test_objects.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/objects/test_objects.py 2019-10-22 19:46:04.000000000 +0000 @@ -63,7 +63,7 @@ 'NetworkRBAC': '1.2-192845c5ed0718e1c54fac36936fcd7d', 'NetworkSegment': '1.0-57b7f2960971e3b95ded20cbc59244a8', 'NetworkSegmentRange': '1.0-bdec1fffc9058ea676089b1f2f2b3cf3', - 'Port': '1.4-1b6183bccfc2cd210919a1a72faefce1', + 'Port': '1.5-98f35183d876c9beb188f4bf44d4d886', 'PortBinding': '1.0-3306deeaa6deb01e33af06777d48d578', 'PortBindingLevel': '1.1-50d47f63218f87581b6cd9a62db574e5', 'PortDataPlaneStatus': '1.0-25be74bda46c749653a10357676c0ab2', diff -Nru neutron-14.0.2/neutron/tests/unit/objects/test_ports.py neutron-14.0.3/neutron/tests/unit/objects/test_ports.py --- neutron-14.0.2/neutron/tests/unit/objects/test_ports.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/objects/test_ports.py 2019-10-22 19:46:04.000000000 +0000 @@ -354,6 +354,25 @@ self.context, policy_id=old_policy_id) self.assertEqual(0, len(qos_binding_obj)) + @mock.patch.object(policy.QosPolicy, 'unset_default') + def test_qos_network_policy_id(self, *mocks): + policy_obj = policy.QosPolicy(self.context) + policy_obj.create() + + obj = self._make_object(self.obj_fields[0]) + obj.create() + obj = ports.Port.get_object(self.context, id=obj.id) + self.assertIsNone(obj.qos_network_policy_id) + self.assertIsNone(obj.qos_policy_id) + + network = self._create_test_network(qos_policy_id=policy_obj.id) + self.update_obj_fields({'network_id': network.id}) + obj = self._make_object(self.obj_fields[1]) + obj.create() + obj = ports.Port.get_object(self.context, id=obj.id) + self.assertEqual(policy_obj.id, obj.qos_network_policy_id) + self.assertIsNone(obj.qos_policy_id) + def test_get_objects_queries_constant(self): self.skipTest( 'Port object loads segment info without relationships') @@ -461,6 +480,12 @@ port_v1_4_no_binding = port_v1_4.obj_from_primitive(primitive) port_v1_4_no_binding.obj_to_primitive(target_version='1.3') + def test_v1_5_to_v1_4_drops_qos_network_policy_id(self): + port_new = self._create_test_port() + port_v1_4 = port_new.obj_to_primitive(target_version='1.4') + self.assertNotIn('qos_network_policy_id', + port_v1_4['versioned_object.data']) + def test_get_ports_ids_by_security_groups_except_router(self): sg_id = self._create_test_security_group_id() filter_owner = constants.ROUTER_INTERFACE_OWNERS_SNAT diff -Nru neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py --- neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py 2019-10-22 19:46:04.000000000 +0000 @@ -69,6 +69,7 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): _mechanism_drivers = ['openvswitch', 'fake_agent', 'l2population'] + tenant = 'tenant' def setUp(self): super(TestL2PopulationRpcTestCase, self).setUp() @@ -366,13 +367,15 @@ enable_dhcp=False) as snet: with self.port( subnet=snet, + project_id=self.tenant, device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)\ as port: port_id = port['port']['id'] plugin.update_distributed_port_binding(self.adminContext, port_id, {'port': {portbindings.HOST_ID: HOST_4, 'device_id': router['id']}}) - port = self._show('ports', port_id) + port = self._show('ports', port_id, + neutron_context=self.adminContext) self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED, port['port'][portbindings.VIF_TYPE]) self.callbacks.update_device_up( diff -Nru neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py --- neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_bridge_test_base.py 2019-10-22 19:46:04.000000000 +0000 @@ -17,6 +17,7 @@ import mock from oslo_utils import importutils +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base @@ -169,7 +170,7 @@ arp_tpa=gateway_ip, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=3, - table_id=self.dvr_process_table_id), + table_id=constants.FLOOD_TO_TUN), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) @@ -181,7 +182,7 @@ gateway_ip=gateway_ip) (dp, ofp, ofpp) = self._get_dp() expected = [ - call.uninstall_flows(table_id=self.dvr_process_table_id, + call.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=ofpp.OFPMatch( eth_type=self.ether_types.ETH_TYPE_ARP, arp_tpa=gateway_ip, @@ -206,7 +207,7 @@ ip_proto=self.in_proto.IPPROTO_ICMPV6, vlan_vid=vlan_tag | ofp.OFPVID_PRESENT), priority=3, - table_id=self.dvr_process_table_id), + table_id=constants.FLOOD_TO_TUN), active_bundle=None), ] self.assertEqual(expected, self.mock.mock_calls) @@ -218,7 +219,7 @@ gateway_mac=gateway_mac) (dp, ofp, ofpp) = self._get_dp() expected = [ - call.uninstall_flows(table_id=self.dvr_process_table_id, + call.uninstall_flows(table_id=constants.FLOOD_TO_TUN, match=ofpp.OFPMatch( eth_src=gateway_mac, eth_type=self.ether_types.ETH_TYPE_IPV6, diff -Nru neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py --- neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py 2019-10-22 19:46:04.000000000 +0000 @@ -180,6 +180,20 @@ "Port", mock.ANY, "other_config", vlan_mapping) self.assertTrue(needs_binding) + def test_setup_physical_bridges_during_agent_initialization(self): + with mock.patch.object( + self.mod_agent.OVSNeutronAgent, + 'setup_physical_bridges') as setup_physical_bridges,\ + mock.patch.object( + self.mod_agent.OVSNeutronAgent, 'setup_rpc') as setup_rpc: + setup_rpc.side_effect = oslo_messaging.MessagingException( + "Test communication failure") + try: + self._make_agent() + except oslo_messaging.MessagingException: + pass + setup_physical_bridges.assert_called_once_with(mock.ANY) + def test_datapath_type_system(self): # verify kernel datapath is default expected = constants.OVS_DATAPATH_SYSTEM @@ -685,7 +699,7 @@ def test_add_port_tag_info(self): lvm = mock.Mock() - lvm.vlan = "1" + lvm.vlan = 1 self.agent.vlan_manager.mapping["net1"] = lvm ovs_db_list = [{'name': 'tap1', 'tag': [], @@ -718,6 +732,47 @@ "other_config", {"tag": "1"})] int_br.assert_has_calls(set_db_attribute_calls, any_order=True) + def test_add_port_tag_info_with_tagged_ports(self): + lvm = mock.Mock() + lvm.vlan = 1 + self.agent.vlan_manager.mapping["net1"] = lvm + ovs_db_list1 = [{'name': 'tap1', + 'tag': 1, + 'other_config': {'segmentation_id': '1', 'tag': '1'}}] + ovs_db_list2 = [{'name': 'tap2', + 'tag': 2, + 'other_config': {'segmentation_id': '1', 'tag': '1'}}, + {'name': 'tap3', + 'tag': 1, + 'other_config': {'segmentation_id': '2', 'tag': '2'}}] + vif_port1 = mock.Mock() + vif_port1.port_name = 'tap1' + vif_port2 = mock.Mock() + vif_port2.port_name = 'tap2' + vif_port2.ofport = 7 + vif_port3 = mock.Mock() + vif_port3.port_name = 'tap3' + vif_port3.ofport = 8 + port_details1 = [{'network_id': 'net1', 'vif_port': vif_port1}] + port_details2 = [{'network_id': 'net1', 'vif_port': vif_port2}, + {'network_id': 'net1', 'vif_port': vif_port3}] + with mock.patch.object(self.agent, 'int_br') as int_br: + int_br.get_ports_attributes.return_value = ovs_db_list1 + self.agent._add_port_tag_info(port_details1) + int_br.set_db_attribute.assert_not_called() + # Reset mock to check port with changed tag + int_br.reset_mock() + int_br.get_ports_attributes.return_value = ovs_db_list2 + self.agent._add_port_tag_info(port_details2) + expected_calls = \ + [mock.call.set_db_attribute("Port", "tap2", + "other_config", {'segmentation_id': '1', 'tag': '1'}), + mock.call.uninstall_flows(in_port=7), + mock.call.set_db_attribute("Port", "tap3", + "other_config", {'segmentation_id': '2', 'tag': '1'}), + mock.call.uninstall_flows(in_port=8)] + int_br.assert_has_calls(expected_calls) + def test_bind_devices(self): devices_up = ['tap1'] devices_down = ['tap2'] @@ -1773,13 +1828,16 @@ self.agent.reclaim_local_vlan('net2') tun_br.delete_port.assert_called_once_with('gre-02020202') - def test_ext_br_recreated(self): + def _test_ext_br_recreated(self, setup_bridges_side_effect): bridge_mappings = {'physnet0': 'br-ex0', 'physnet1': 'br-ex1'} ex_br_mocks = [mock.Mock(br_name='br-ex0'), mock.Mock(br_name='br-ex1')] phys_bridges = {'physnet0': ex_br_mocks[0], 'physnet1': ex_br_mocks[1]}, + bridges_added = ['br-ex0'] + expected_added_bridges = ( + bridges_added if setup_bridges_side_effect else []) with mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_NORMAL), \ mock.patch.object(self.agent, '_agent_has_updates', @@ -1792,12 +1850,23 @@ setup_physical_bridges, \ mock.patch.object(self.agent.ovs.ovsdb, 'idl_monitor') as \ mock_idl_monitor: - mock_idl_monitor.bridges_added = ['br-ex0'] + mock_idl_monitor.bridges_added = bridges_added + setup_physical_bridges.side_effect = setup_bridges_side_effect try: self.agent.rpc_loop(polling_manager=mock.Mock()) except TypeError: pass + # Setup bridges should be called once even if it will raise Runtime + # Error because there is raised TypeError in _agent_has_updates to stop + # agent after first loop iteration setup_physical_bridges.assert_called_once_with({'physnet0': 'br-ex0'}) + self.assertEqual(expected_added_bridges, self.agent.added_bridges) + + def test_ext_br_recreated(self): + self._test_ext_br_recreated(setup_bridges_side_effect=None) + + def test_ext_br_recreated_fail_setup_physical_bridge(self): + self._test_ext_br_recreated(setup_bridges_side_effect=RuntimeError) def test_daemon_loop_uses_polling_manager(self): ex_br_mock = mock.Mock(br_name="br-ex0") diff -Nru neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py --- neutron-14.0.2/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/plugins/ml2/drivers/test_type_vlan.py 2019-10-22 19:46:04.000000000 +0000 @@ -100,6 +100,13 @@ segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN} self.driver.validate_provider_segment(segment) + def test_validate_provider_segment_no_phys_network_seg_id_0(self): + segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, + api.SEGMENTATION_ID: 0} + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) + def test_validate_provider_segment_with_missing_physical_network(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, api.SEGMENTATION_ID: 1} @@ -117,11 +124,15 @@ def test_validate_provider_segment_with_invalid_segmentation_id(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, - api.PHYSICAL_NETWORK: PROVIDER_NET, - api.SEGMENTATION_ID: 5000} - self.assertRaises(exc.InvalidInput, - self.driver.validate_provider_segment, - segment) + api.PHYSICAL_NETWORK: PROVIDER_NET} + segmentation_ids = [ + p_const.MIN_VLAN_TAG - 1, + p_const.MAX_VLAN_TAG + 1] + for segmentation_id in segmentation_ids: + segment[api.SEGMENTATION_ID] = segmentation_id + self.assertRaises(exc.InvalidInput, + self.driver.validate_provider_segment, + segment) def test_validate_provider_segment_with_invalid_input(self): segment = {api.NETWORK_TYPE: p_const.TYPE_VLAN, diff -Nru neutron-14.0.2/neutron/tests/unit/plugins/ml2/test_plugin.py neutron-14.0.3/neutron/tests/unit/plugins/ml2/test_plugin.py --- neutron-14.0.2/neutron/tests/unit/plugins/ml2/test_plugin.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/plugins/ml2/test_plugin.py 2019-10-22 19:46:04.000000000 +0000 @@ -1004,6 +1004,14 @@ plugin.update_port_status(ctx, short_id, 'UP') mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY) + def test_update_port_with_empty_data(self): + ctx = context.get_admin_context() + plugin = directory.get_plugin() + with self.port() as port: + port_id = port['port']['id'] + new_port = plugin.update_port(ctx, port_id, {"port": {}}) + self.assertEqual(port["port"], new_port) + def _add_fake_dhcp_agent(self): agent = mock.Mock() plugin = directory.get_plugin() @@ -1456,6 +1464,12 @@ fixed_ips['subnet_id']) self._test_list_resources('port', [], query_params=query_params) + query_params = """ +fixed_ips=ip_address_substr%%3D%s&fixed_ips=subnet_id%%3D%s&limit=1 +""".strip() % ('192.168.', + fixed_ips['subnet_id']) + self._test_list_resources('port', [], + query_params=query_params) def test_list_ports_filtered_by_fixed_ip_substring_dual_stack(self): with self.subnet() as subnet: @@ -1505,6 +1519,12 @@ self.assertEqual(set([port1['port']['id'], port2['port']['id']]), set([port['id'] for port in ports_data['ports']])) self.assertEqual(2, len(ports_data['ports'])) + query_params = "security_groups=%s&limit=1" % ( + port1['port']['security_groups'][0]) + ports_data = self._list('ports', query_params=query_params) + self.assertIn(ports_data['ports'][0]['id'], + [port1['port']['id'], port2['port']['id']]) + self.assertEqual(1, len(ports_data['ports'])) query_params = "security_groups=%s&id=%s" % ( port1['port']['security_groups'][0], port1['port']['id']) @@ -2146,6 +2166,66 @@ # Successful binding should only be attempted once. self.assertEqual(1, at_mock.call_count) + def test__bind_port_if_needed_concurrent_calls(self): + port_vif_type = portbindings.VIF_TYPE_UNBOUND + bound_vif_type = portbindings.VIF_TYPE_OVS + + plugin, port_context, bound_context = ( + self._create_port_and_bound_context(port_vif_type, + bound_vif_type)) + bound_context._binding_levels = [mock.Mock( + port_id="port_id", + level=0, + driver='fake_agent', + segment_id="11111111-2222-3333-4444-555555555555")] + + # let _commit_port_binding replace the PortContext with a new instance + # which does not have any binding levels set to simulate the concurrent + # port binding operations fail + with mock.patch( + 'neutron.plugins.ml2.plugin.Ml2Plugin._bind_port', + return_value=bound_context),\ + mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' + '_notify_port_updated') as npu_mock,\ + mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' + '_attempt_binding', + side_effect=plugin._attempt_binding) as ab_mock,\ + mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' + '_commit_port_binding', return_value=( + mock.MagicMock(), True, True)) as cpb_mock: + ret_context = plugin._bind_port_if_needed(port_context, + allow_notify=True) + # _attempt_binding will return without doing anything during + # the second iteration since _should_bind_port returns False + self.assertEqual(2, ab_mock.call_count) + self.assertEqual(1, cpb_mock.call_count) + # _notify_port_updated will still be called though it does + # nothing due to the missing binding levels + npu_mock.assert_called_once_with(ret_context) + + def test__commit_port_binding_populating_with_binding_levels(self): + port_vif_type = portbindings.VIF_TYPE_OVS + bound_vif_type = portbindings.VIF_TYPE_OVS + + plugin, port_context, bound_context = ( + self._create_port_and_bound_context(port_vif_type, + bound_vif_type)) + db_portbinding = port_obj.PortBindingLevel( + self.context, + port_id=uuidutils.generate_uuid(), + level=0, + driver='fake_agent', + segment_id="11111111-2222-3333-4444-555555555555") + bound_context.network.current = {'id': 'net_id'} + + with mock.patch.object(ml2_db, 'get_binding_level_objs', + return_value=[db_portbinding]),\ + mock.patch.object(driver_context.PortContext, + '_push_binding_level') as pbl_mock: + plugin._commit_port_binding( + port_context, bound_context, True, False) + pbl_mock.assert_called_once_with(db_portbinding) + def test_port_binding_profile_not_changed(self): profile = {'e': 5} profile_arg = {portbindings.PROFILE: profile} diff -Nru neutron-14.0.2/neutron/tests/unit/plugins/ml2/test_rpc.py neutron-14.0.3/neutron/tests/unit/plugins/ml2/test_rpc.py --- neutron-14.0.2/neutron/tests/unit/plugins/ml2/test_rpc.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/plugins/ml2/test_rpc.py 2019-10-22 19:46:04.000000000 +0000 @@ -259,6 +259,16 @@ mock.ANY, 'fake_port_id', constants.PORT_STATUS_DOWN, 'fake_host') + def test_notify_l2pop_port_wiring_non_dvr_port(self): + port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX} + l2pop_driver = ( + self.plugin.mechanism_manager.mech_drivers.get.return_value) + with mock.patch.object(ml2_db, 'get_port') as ml2_db_get_port: + ml2_db_get_port.return_value = port + self.callbacks.notify_l2pop_port_wiring( + 'port_id', mock.Mock(), 'DOWN', 'host', agent_restarted=None) + self.assertFalse(l2pop_driver.obj.agent_restarted.called) + def test_update_device_down_call_update_port_status_failed(self): self.plugin.update_port_status.side_effect = exc.StaleDataError self.assertEqual({'device': 'fake_device', 'exists': False}, diff -Nru neutron-14.0.2/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py neutron-14.0.3/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py --- neutron-14.0.2/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py 2019-10-22 19:46:04.000000000 +0000 @@ -14,6 +14,7 @@ # limitations under the License. import datetime +from operator import attrgetter import random import mock @@ -500,7 +501,7 @@ def test_filter_bindings(self): self.ctx = context.get_admin_context() dhcp_agt_ids = self._create_dhcp_agents() - network_ids = self._create_test_networks(num_net=4) + network_ids = sorted(self._create_test_networks(num_net=4)) ndab_obj1 = network_obj.NetworkDhcpAgentBinding(self.ctx, network_id=network_ids[0], dhcp_agent_id=dhcp_agt_ids[0]) ndab_obj1.create() @@ -513,8 +514,9 @@ ndab_obj4 = network_obj.NetworkDhcpAgentBinding(self.ctx, network_id=network_ids[3], dhcp_agent_id=dhcp_agt_ids[1]) ndab_obj4.create() - bindings_objs = network_obj.NetworkDhcpAgentBinding.get_objects( - self.ctx) + bindings_objs = sorted(network_obj.NetworkDhcpAgentBinding.get_objects( + self.ctx), key=attrgetter('network_id')) + with mock.patch.object(self, 'agent_starting_up', side_effect=[True, False]): res = [b for b in self._filter_bindings(None, bindings_objs)] diff -Nru neutron-14.0.2/neutron/tests/unit/services/qos/test_qos_plugin.py neutron-14.0.3/neutron/tests/unit/services/qos/test_qos_plugin.py --- neutron-14.0.2/neutron/tests/unit/services/qos/test_qos_plugin.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/services/qos/test_qos_plugin.py 2019-10-22 19:46:04.000000000 +0000 @@ -117,41 +117,34 @@ self.assertIsInstance(call_args[2], policy_object.QosPolicy) def _create_and_extend_port(self, bw_rules, physical_network='public', - has_qos_policy=True, network_qos=None): + has_qos_policy=True, has_net_qos_policy=False): network_id = uuidutils.generate_uuid() - if has_qos_policy or network_qos: - policy = self.policy - policy_id = self.policy.id - self.policy.rules = bw_rules - for rule in bw_rules: - rule.qos_policy_id = self.policy.id - else: - policy = None - policy_id = None - - port_res = { - "id": uuidutils.generate_uuid(), - "qos_policy_id": policy_id, - "network_id": network_id, - "binding:vnic_type": "normal", + self.port_data = { + 'port': {'id': uuidutils.generate_uuid(), + 'network_id': network_id} } - network_mock = mock.MagicMock(id=network_id, qos_policy_id=policy_id) + + if has_qos_policy: + self.port_data['port']['qos_policy_id'] = self.policy.id + self.policy.rules = bw_rules + elif has_net_qos_policy: + self.port_data['port']['qos_network_policy_id'] = self.policy.id + self.policy.rules = bw_rules + + self.port = ports_object.Port( + self.ctxt, **self.port_data['port']) + + port_res = {"binding:vnic_type": "normal"} segment_mock = mock.MagicMock(network_id=network_id, physical_network=physical_network) - with mock.patch( - 'neutron.objects.network.Network.get_object', - return_value=network_mock - ), mock.patch( - 'neutron.objects.network.NetworkSegment.get_objects', - return_value=[segment_mock] - ), mock.patch( - 'neutron.objects.qos.policy.QosPolicy.get_port_policy', - return_value=policy - ): + with mock.patch('neutron.objects.network.NetworkSegment.get_objects', + return_value=[segment_mock]), \ + mock.patch('neutron.objects.qos.policy.QosPolicy.get_object', + return_value=self.policy): return qos_plugin.QoSPlugin._extend_port_resource_request( - port_res, {}) + port_res, self.port) def test__extend_port_resource_request_min_bw_rule(self): self.min_rule.direction = lib_constants.EGRESS_DIRECTION @@ -212,7 +205,7 @@ self.min_rule.qos_policy_id = self.policy.id port = self._create_and_extend_port([self.min_rule], - network_qos=self.policy) + has_net_qos_policy=True) self.assertEqual( ['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'], port['resource_request']['required'] diff -Nru neutron-14.0.2/neutron/tests/unit/services/revisions/test_revision_plugin.py neutron-14.0.3/neutron/tests/unit/services/revisions/test_revision_plugin.py --- neutron-14.0.2/neutron/tests/unit/services/revisions/test_revision_plugin.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/services/revisions/test_revision_plugin.py 2019-10-22 19:46:04.000000000 +0000 @@ -161,7 +161,7 @@ expected_code=exc.HTTPPreconditionFailed.code) def test_port_ip_update_revises(self): - with self.port() as port: + with self.subnet() as subnet, self.port(subnet=subnet) as port: rev = port['port']['revision_number'] new = {'port': {'fixed_ips': port['port']['fixed_ips']}} # ensure adding an IP allocation updates the port diff -Nru neutron-14.0.2/neutron/tests/unit/services/trunk/rpc/test_server.py neutron-14.0.3/neutron/tests/unit/services/trunk/rpc/test_server.py --- neutron-14.0.2/neutron/tests/unit/services/trunk/rpc/test_server.py 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/services/trunk/rpc/test_server.py 2019-10-22 19:46:04.000000000 +0000 @@ -13,8 +13,10 @@ import mock from neutron_lib.api.definitions import portbindings +from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from neutron_lib import rpc as n_rpc +from sqlalchemy.orm import exc from neutron.api.rpc.callbacks import events from neutron.api.rpc.callbacks import resources @@ -147,6 +149,84 @@ self.assertEqual(trunk.status, constants.ERROR_STATUS) self.assertEqual([], updated_subports[trunk.id]) + def test_udate_subport_bindings_staledataerror(self): + with self.port() as _parent_port: + parent_port = _parent_port + trunk = self._create_test_trunk(parent_port) + port_data = {portbindings.HOST_ID: 'trunk_host_id'} + self.core_plugin.update_port( + self.context, parent_port['port']['id'], {'port': port_data}) + subports = [] + for vid in range(0, 3): + with self.port() as new_port: + new_port[portbindings.HOST_ID] = 'trunk_host_id' + obj = trunk_obj.SubPort( + context=self.context, + trunk_id=trunk['id'], + port_id=new_port['port']['id'], + segmentation_type='vlan', + segmentation_id=vid) + subports.append(obj) + + test_obj = server.TrunkSkeleton() + test_obj._trunk_plugin = self.trunk_plugin + test_obj._core_plugin = self.core_plugin + self.mock_update_port.return_value = {portbindings.VIF_TYPE: + portbindings.VIF_TYPE_BINDING_FAILED} + mock_trunk_obj = mock.Mock(port_id=parent_port['port']['id']) + mock_trunk_obj.update.side_effect = exc.StaleDataError + + with mock.patch.object( + trunk_obj.Trunk, + 'get_object', + return_value=mock_trunk_obj): + self.assertRaises( + exc.StaleDataError, + test_obj.update_subport_bindings, + self.context, + subports=subports) + self.assertEqual( + db_api.MAX_RETRIES, + mock_trunk_obj.update.call_count) + + def test_udate_subport_bindings_noretryerror(self): + with self.port() as _parent_port: + parent_port = _parent_port + trunk = self._create_test_trunk(parent_port) + port_data = {portbindings.HOST_ID: 'trunk_host_id'} + self.core_plugin.update_port( + self.context, parent_port['port']['id'], {'port': port_data}) + subports = [] + for vid in range(0, 3): + with self.port() as new_port: + new_port[portbindings.HOST_ID] = 'trunk_host_id' + obj = trunk_obj.SubPort( + context=self.context, + trunk_id=trunk['id'], + port_id=new_port['port']['id'], + segmentation_type='vlan', + segmentation_id=vid) + subports.append(obj) + + test_obj = server.TrunkSkeleton() + test_obj._trunk_plugin = self.trunk_plugin + test_obj._core_plugin = self.core_plugin + self.mock_update_port.return_value = {portbindings.VIF_TYPE: + portbindings.VIF_TYPE_BINDING_FAILED} + mock_trunk_obj = mock.Mock(port_id=parent_port['port']['id']) + mock_trunk_obj.update.side_effect = KeyError + + with mock.patch.object( + trunk_obj.Trunk, + 'get_object', + return_value=mock_trunk_obj): + self.assertRaises( + KeyError, + test_obj.update_subport_bindings, + self.context, + subports=subports) + self.assertEqual(1, mock_trunk_obj.update.call_count) + def test_update_subport_bindings_exception(self): with self.port() as _parent_port: parent_port = _parent_port diff -Nru neutron-14.0.2/neutron/tests/unit/tests/functional/test_base.py neutron-14.0.3/neutron/tests/unit/tests/functional/test_base.py --- neutron-14.0.2/neutron/tests/unit/tests/functional/test_base.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/tests/functional/test_base.py 2019-10-22 19:46:04.000000000 +0000 @@ -0,0 +1,59 @@ +# Copyright 2019 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.tests import base +from neutron.tests.functional import base as functional_base + + +NEW_CONFIG_GROUP = cfg.OptGroup('testgroup', + title='Test wrapping cfg register') +SOME_OPTIONS = [ + cfg.StrOpt('str_opt', default='default_value'), + cfg.StrOpt('int_opt', default=1), + cfg.BoolOpt('bool_opt', default=True) +] + + +def register_some_options(cfg=cfg.CONF): + cfg.register_opts(SOME_OPTIONS, 'testgroup') + + +class ConfigDecoratorTestCase(base.BaseTestCase): + + def setUp(self): + super(ConfigDecoratorTestCase, self).setUp() + cfg.CONF.register_group(NEW_CONFIG_GROUP) + + def test_no_config_decorator(self): + register_some_options() + self.assertEqual('default_value', cfg.CONF.testgroup.str_opt) + self.assertEqual('1', cfg.CONF.testgroup.int_opt) + self.assertTrue(cfg.CONF.testgroup.bool_opt) + + def test_override_variables(self): + opts = [('str_opt', 'another_value', 'testgroup'), + ('int_opt', 123, 'testgroup'), + ('bool_opt', False, 'testgroup')] + cfg_decorator = functional_base.config_decorator(register_some_options, + opts) + mock.patch('neutron.tests.unit.tests.functional.test_base.' + 'register_some_options', new=cfg_decorator).start() + register_some_options() + self.assertEqual('another_value', cfg.CONF.testgroup.str_opt) + self.assertEqual('123', cfg.CONF.testgroup.int_opt) + self.assertFalse(cfg.CONF.testgroup.bool_opt) diff -Nru neutron-14.0.2/neutron/tests/unit/test_service.py neutron-14.0.3/neutron/tests/unit/test_service.py --- neutron-14.0.2/neutron/tests/unit/test_service.py 2019-07-01 02:54:49.000000000 +0000 +++ neutron-14.0.3/neutron/tests/unit/test_service.py 2019-10-22 19:46:04.000000000 +0000 @@ -61,9 +61,13 @@ def test_rpc_workers_zero(self): self._test_rpc_workers(0, 1) - def test_rpc_workers_default(self): + def test_rpc_workers_default_api_workers_default(self): self._test_rpc_workers(None, int(self.worker_count / 2)) + def test_rpc_workers_default_api_workers_set(self): + cfg.CONF.set_override('api_workers', 18) + self._test_rpc_workers(None, 9) + def test_rpc_workers_defined(self): self._test_rpc_workers(42, 42) diff -Nru neutron-14.0.2/neutron.egg-info/entry_points.txt neutron-14.0.3/neutron.egg-info/entry_points.txt --- neutron-14.0.2/neutron.egg-info/entry_points.txt 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/neutron.egg-info/entry_points.txt 2019-10-22 19:48:02.000000000 +0000 @@ -120,7 +120,7 @@ NetworkPortSecurity = neutron.objects.network:NetworkPortSecurity NetworkRBAC = neutron.objects.network:NetworkRBAC NetworkSegment = neutron.objects.network:NetworkSegment -NetworkSegmentRange = neutron.objects.network:NetworkSegmentRange +NetworkSegmentRange = neutron.objects.network_segment_range:NetworkSegmentRange Port = neutron.objects.ports:Port PortBinding = neutron.objects.ports:PortBinding PortBindingLevel = neutron.objects.ports:PortBindingLevel diff -Nru neutron-14.0.2/neutron.egg-info/pbr.json neutron-14.0.3/neutron.egg-info/pbr.json --- neutron-14.0.2/neutron.egg-info/pbr.json 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/neutron.egg-info/pbr.json 2019-10-22 19:48:02.000000000 +0000 @@ -1 +1 @@ -{"git_version": "4748b97b70", "is_release": true} \ No newline at end of file +{"git_version": "89b265898f", "is_release": true} \ No newline at end of file diff -Nru neutron-14.0.2/neutron.egg-info/PKG-INFO neutron-14.0.3/neutron.egg-info/PKG-INFO --- neutron-14.0.2/neutron.egg-info/PKG-INFO 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/neutron.egg-info/PKG-INFO 2019-10-22 19:48:02.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: neutron -Version: 14.0.2 +Version: 14.0.3 Summary: OpenStack Networking Home-page: https://docs.openstack.org/neutron/latest/ Author: OpenStack diff -Nru neutron-14.0.2/neutron.egg-info/SOURCES.txt neutron-14.0.3/neutron.egg-info/SOURCES.txt --- neutron-14.0.2/neutron.egg-info/SOURCES.txt 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/neutron.egg-info/SOURCES.txt 2019-10-22 19:48:03.000000000 +0000 @@ -106,6 +106,7 @@ doc/source/admin/misc.rst doc/source/admin/neutron_linuxbridge.rst doc/source/admin/ops-ip-availability.rst +doc/source/admin/ops-quotas.rst doc/source/admin/ops-resource-purge.rst doc/source/admin/ops-resource-tags.rst doc/source/admin/ops.rst @@ -2083,6 +2084,8 @@ neutron/tests/unit/tests/example/dir/example_module.py neutron/tests/unit/tests/example/dir/subdir/__init__.py neutron/tests/unit/tests/example/dir/subdir/example_module.py +neutron/tests/unit/tests/functional/__init__.py +neutron/tests/unit/tests/functional/test_base.py neutron/tests/var/ca.crt neutron/tests/var/certandkey.pem neutron/tests/var/certificate.crt @@ -2178,6 +2181,7 @@ releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml releasenotes/notes/config-wsgi-pool-size-a4c06753b79fee6d.yaml releasenotes/notes/correlate-address-scope-with-network-ea16e16b0154ac21.yaml +releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml releasenotes/notes/default-subnetpool-semantics-1cdc5cdde2be88c2.yaml releasenotes/notes/deprecate-advertise-mtu-51e3f78475a14efc.yaml @@ -2233,8 +2237,10 @@ releasenotes/notes/fix-co-existence-bug-between-sg-logging-and-fwg-logging-ef16077880d76449.yaml releasenotes/notes/fix-deferred-alloction-when-new-mac-in-same-request-as-binding-data-2a01c1ed1a8eff66.yaml releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml +releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml releasenotes/notes/fix-ovsdb-ssl-connection-4058caf4fdcb33ab.yaml releasenotes/notes/fix-security-group-protocol-by-numbers-48afb97ede961716.yaml +releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml releasenotes/notes/floatingips-port-forwarding-65efd8c17a16dffc.yaml releasenotes/notes/gateway-rate-limit-905bee1ed60c6b8e.yaml releasenotes/notes/get_standard_device_mappings_for_mechdriver-bc039d478ea0b162.yaml diff -Nru neutron-14.0.2/PKG-INFO neutron-14.0.3/PKG-INFO --- neutron-14.0.2/PKG-INFO 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/PKG-INFO 2019-10-22 19:48:03.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: neutron -Version: 14.0.2 +Version: 14.0.3 Summary: OpenStack Networking Home-page: https://docs.openstack.org/neutron/latest/ Author: OpenStack diff -Nru neutron-14.0.2/releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml neutron-14.0.3/releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml --- neutron-14.0.2/releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-14.0.3/releasenotes/notes/custom_ethertypes-eae3fcab3293e3a1.yaml 2019-10-22 19:46:04.000000000 +0000 @@ -0,0 +1,9 @@ +--- +security: + - | + The OVS Firewall blocks traffic that does not have either the IPv4 or IPv6 + ethertypes at present. This is a behavior change compared to the + iptables_hybrid firewall, which only operates on IP packets and thus does + not address other ethertypes. There is now a configuration option in the + neutron openvswitch agent configuration file for permitted ethertypes and + then ensures that the requested ethertypes are permitted on initialization. diff -Nru neutron-14.0.2/releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml neutron-14.0.3/releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml --- neutron-14.0.2/releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-14.0.3/releasenotes/notes/fix-net-delete-race-f2fa5bac3ab35a5b.yaml 2019-10-22 19:46:04.000000000 +0000 @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixes an issue where deletion of a provider network could result in ML2 + mechanism drivers not being passed information about the network's provider + fields. The consequences of this depend on the mechanism driver in use, but + could result in the event being ignored, leading to an incorrectly + configured network. See `bug 1841967 + `__ for details. diff -Nru neutron-14.0.2/releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml neutron-14.0.3/releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml --- neutron-14.0.2/releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-14.0.3/releasenotes/notes/fix-update-port-fixed-ips-on-routed-provider-networks-c54a54844d9a3926.yaml 2019-10-22 19:46:04.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + When updating the fixed-ips of a port residing on a routed provider + network the port update would always fail if *host* was not set. + See bug: `1844124 `_. + diff -Nru neutron-14.0.2/setup.cfg neutron-14.0.3/setup.cfg --- neutron-14.0.2/setup.cfg 2019-07-01 02:56:19.000000000 +0000 +++ neutron-14.0.3/setup.cfg 2019-10-22 19:48:03.000000000 +0000 @@ -198,7 +198,7 @@ NetworkPortSecurity = neutron.objects.network:NetworkPortSecurity NetworkRBAC = neutron.objects.network:NetworkRBAC NetworkSegment = neutron.objects.network:NetworkSegment - NetworkSegmentRange = neutron.objects.network:NetworkSegmentRange + NetworkSegmentRange = neutron.objects.network_segment_range:NetworkSegmentRange Port = neutron.objects.ports:Port PortBinding = neutron.objects.ports:PortBinding PortBindingLevel = neutron.objects.ports:PortBindingLevel diff -Nru neutron-14.0.2/.zuul.yaml neutron-14.0.3/.zuul.yaml --- neutron-14.0.2/.zuul.yaml 2019-07-01 02:54:56.000000000 +0000 +++ neutron-14.0.3/.zuul.yaml 2019-10-22 19:46:04.000000000 +0000 @@ -97,7 +97,7 @@ irrelevant-files: *tempest-irrelevant-files - legacy-tempest-dsvm-neutron-dvr-multinode-full: irrelevant-files: *tempest-irrelevant-files - - tempest-full-py3-opensuse150: + - tempest-full-py3-opensuse15: irrelevant-files: *tempest-irrelevant-files - job: @@ -271,6 +271,7 @@ enable_distributed_routing: True l2_population: True tunnel_types: vxlan + arp_responder: True ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex @@ -301,6 +302,7 @@ enable_distributed_routing: True l2_population: True tunnel_types: vxlan + arp_responder: True ovs: tunnel_bridge: br-tun bridge_mappings: public:br-ex @@ -348,6 +350,7 @@ enable_distributed_routing: True l2_population: True tunnel_types: vxlan,gre + arp_responder: True securitygroup: firewall_driver: iptables_hybrid $NEUTRON_L3_CONF: