diff -Nru ryu-4.9/CONTRIBUTING.rst ryu-4.15/CONTRIBUTING.rst --- ryu-4.9/CONTRIBUTING.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/CONTRIBUTING.rst 2017-07-02 11:08:32.000000000 +0000 @@ -5,46 +5,66 @@ Submitting a change =================== -Send patches to ryu-devel@lists.sourceforge.net. Please don't use 'pull -request' on github. We expect you to send a patch in Linux kernel -development style. If you are not familiar with it, please read the -following document: +Send patches to ryu-devel@lists.sourceforge.net. Please don't use "Pull +Request" on GitHub. We expect you to send patches in "git-format-patch" +style. -https://www.kernel.org/doc/Documentation/SubmittingPatches +.. code-block:: bash + + # "N" means the number of commits to be included + $ git format-patch -s HEAD~N + + # To add cover (e.g., [PATCH 0/X]), specify "--cover-letter" option + $ git format-patch -s --cover-letter HEAD~N + + # You can send patches by "git send-email" command + $ git send-email --to="ryu-devel@lists.sourceforge.net" *.patch Please check your changes with pep8 and run unittests to make sure that they don't break the existing features. The following command -does both for you: +does both for you. + +.. code-block:: bash + + # Install dependencies of tests + $ pip install -r tools/test-requires -fujita@rose:~/git/ryu$ ./run_tests.sh + # Execute unit tests and pep8 + $ ./run_tests.sh Of course, you are encouraged to add unittests when you add new features (it's not a must though). Python version and libraries ============================ -* Python 2.6+ - As RHEL 6 adopted python 2.6, features only for 2.7+ should be avoided. +* Python 2.7, 3.4, 3.5: -* standard library + widely used library - Basically widely used == OpenStack adopted - As usual there are exceptions. gevents. Or python binding library for other + Ryu supports multiple Python version. CI tests on Travis-CI is running + on these versions. + +* standard library + widely used library: + + Basically widely used == OpenStack adopted. + As usual there are exceptions. Or python binding library for other component. Coding style guide ================== -* pep8 +* pep8: + As python is used, PEP8 is would be hopefully mandatory for - http://www.python.org/dev/peps/pep-0008/ + https://www.python.org/dev/peps/pep-0008/ + +* pylint: -* pylint Although pylint is useful for finding bugs, but pylint score not very important for now because we're still at early development stage. + https://www.pylint.org/ -* Google python style guide is very helpful - http://google-styleguide.googlecode.com/svn/trunk/pyguide.html +* Google python style guide is very helpful: + http://google.github.io/styleguide/pyguide.html - Guidelines derived from Guido's Recommendations +* Guidelines derived from Guido's Recommendations: ============================= ================= ======== Type Public Internal @@ -62,10 +82,11 @@ Local Variables lower_with_under ============================= ================= ======== -* OpenStack Nova style guide +* OpenStack Nova style guide: https://github.com/openstack/nova/blob/master/HACKING.rst -* JSON files +* JSON files: + Ryu source tree has JSON files under ryu/tests/unit/ofproto/json. They are used by unit tests. To make patches easier to read, they are normalized using tools/normalize_json.py. Please re-run diff -Nru ryu-4.9/debian/changelog ryu-4.15/debian/changelog --- ryu-4.9/debian/changelog 2017-07-28 23:06:47.000000000 +0000 +++ ryu-4.15/debian/changelog 2017-08-04 20:47:51.000000000 +0000 @@ -1,8 +1,9 @@ -ryu (4.9-0ubuntu3) artful; urgency=medium +ryu (4.15-0ubuntu1) artful; urgency=medium - * No-change rebuild against python3.6 + * New upstream release. + * d/p/no-pip.patch, d/control: Drop the use of pip by unit tests. - -- Jeremy Bicha Fri, 28 Jul 2017 19:06:47 -0400 + -- Corey Bryant Fri, 04 Aug 2017 16:47:51 -0400 ryu (4.9-0ubuntu2) zesty; urgency=medium diff -Nru ryu-4.9/debian/control ryu-4.15/debian/control --- ryu-4.9/debian/control 2017-02-01 15:24:32.000000000 +0000 +++ ryu-4.15/debian/control 2017-08-04 20:47:51.000000000 +0000 @@ -28,6 +28,7 @@ python-oslo.config, python-paramiko, python-routes, + python-testtools, python-tinyrpc, python-openvswitch, python-six, @@ -43,6 +44,7 @@ python3-paramiko, python3-routes, python3-six, + python3-testtools, python3-webob, Standards-Version: 3.9.8 Vcs-Browser: https://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ryu diff -Nru ryu-4.9/debian/patches/no-pip.patch ryu-4.15/debian/patches/no-pip.patch --- ryu-4.9/debian/patches/no-pip.patch 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/debian/patches/no-pip.patch 2017-08-04 20:47:51.000000000 +0000 @@ -0,0 +1,62 @@ +Description: Drop use of pip in Ubuntu. +Author: Corey Bryant +Forwarded: no +Last-Update: 2017-08-03 + +--- a/ryu/tests/unit/test_requirements.py ++++ b/ryu/tests/unit/test_requirements.py +@@ -17,10 +17,14 @@ + import os + import sys + import unittest ++import testtools + + import pkg_resources +-from pip.req import parse_requirements +-from pip.download import PipSession ++try: ++ from pip.req import parse_requirements ++ from pip.download import PipSession ++except: ++ pass + from six.moves import urllib + + from nose.tools import ok_ +@@ -57,8 +61,11 @@ + + return requirements + +-OPENSTACK_REQUIREMENTS = _get_requirements(OPENSTACK_REQUIREMENTS_FILES) +-RYU_REQUIREMENTS = _get_requirements(RYU_REQUIREMENTS_FILES) ++try: ++ OPENSTACK_REQUIREMENTS = _get_requirements(OPENSTACK_REQUIREMENTS_FILES) ++ RYU_REQUIREMENTS = _get_requirements(RYU_REQUIREMENTS_FILES) ++except: ++ pass + + + class TestRequirements(unittest.TestCase): +@@ -73,6 +80,7 @@ + def tearDown(self): + pass + ++ @testtools.skip("Skipped by Ubuntu") + def test_with_openstack_requirements(self): + try: + for name, req in OPENSTACK_REQUIREMENTS.items(): +--- a/ryu/utils.py ++++ b/ryu/utils.py +@@ -20,8 +20,11 @@ + import sys + + import six +-from pip import req as pip_req +-from pip.download import PipSession ++try: ++ from pip import req as pip_req ++ from pip.download import PipSession ++except: ++ pass + + + LOG = logging.getLogger('ryu.utils') diff -Nru ryu-4.9/debian/patches/series ryu-4.15/debian/patches/series --- ryu-4.9/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/debian/patches/series 2017-08-04 20:47:51.000000000 +0000 @@ -0,0 +1 @@ +no-pip.patch diff -Nru ryu-4.9/doc/source/app/ofctl_rest.rst ryu-4.15/doc/source/app/ofctl_rest.rst --- ryu-4.9/doc/source/app/ofctl_rest.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/doc/source/app/ofctl_rest.rst 2017-07-02 11:08:32.000000000 +0000 @@ -248,8 +248,17 @@ cookie Require matching entries to contain this cookie value (int) 1 0 cookie_mask Mask used to restrict the cookie bits that must match (int) 1 0 match Fields to match (dict) {"in_port": 1} {} #wildcarded + priority Priority of the entry (int) (See Note) 11111 #wildcarded ============ ================================================================== =============== =============== + .. NOTE:: + + OpenFlow Spec does not allow to filter flow entries by priority, + but when with a large amount of flow entries, filtering by priority + is convenient to get statistics efficiently. + So, this app provides priority field for filtering. + + Response message body: The same as :ref:`get-all-flows-stats` @@ -1785,6 +1794,72 @@ } +Get role +-------- + + Get the current role of the controller from the switch. + + Usage: + + ======= ========================= + Method GET + URI /stats/role/ + ======= ========================= + + Response message body(Openflow1.4 or earlier): + + ============= ============================= ========= + Attribute Description Example + ============= ============================= ========= + dpid Datapath ID 1 + role One of OFPCR_ROLE_* "EQUAL" + generation_id Master Election Generation Id 0 + ============= ============================= ========= + + Response message body(Openflow1.5 or later): + + ============= ============================= ========= + Attribute Description Example + ============= ============================= ========= + dpid Datapath ID 1 + role One of OFPCR_ROLE_* "EQUAL" + short_id ID number for the controller 0 + generation_id Master Election Generation Id 0 + ============= ============================= ========= + + Example of use:: + + $ curl -X GET http://localhost:8080/stats/role/1 + + Response (Openflow1.4 or earlier): + + .. code-block:: javascript + + { + "1": [ + { + "generation_id": 0, + "role": "EQUAL" + } + ] + } + + + Response (Openflow1.5 or later): + + .. code-block:: javascript + + { + "1": [ + { + "generation_id": 0, + "role": "EQUAL", + "short_id": 0 + } + ] + } + + Update the switch stats ======================= diff -Nru ryu-4.9/doc/source/index.rst ryu-4.15/doc/source/index.rst --- ryu-4.9/doc/source/index.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/doc/source/index.rst 2017-07-02 11:08:32.000000000 +0000 @@ -16,7 +16,6 @@ developing.rst configuration.rst tests.rst - using_with_openstack.rst snort_integrate.rst app.rst diff -Nru ryu-4.9/doc/source/library_mrt.rst ryu-4.15/doc/source/library_mrt.rst --- ryu-4.9/doc/source/library_mrt.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_mrt.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,28 @@ +**************** +MRT file library +**************** + +Introduction +============ + +Ryu MRT file library helps you to read/write MRT +(Multi-Threaded Routing Toolkit) Routing Information Export Format +[`RFC6396`_]. + +.. _RFC6396: https://tools.ietf.org/html/rfc6396 + +Reading MRT file +================ + +For loading the routing information contained in MRT files, you can use +mrtlib.Reader. + +.. autoclass:: ryu.lib.mrtlib.Reader + +Writing MRT file +================ + +For dumping the routing information which your RyuApp generated, you can use +mrtlib.Writer. + +.. autoclass:: ryu.lib.mrtlib.Writer diff -Nru ryu-4.9/doc/source/library_ovsdb_manager.rst ryu-4.15/doc/source/library_ovsdb_manager.rst --- ryu-4.9/doc/source/library_ovsdb_manager.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/doc/source/library_ovsdb_manager.rst 2017-07-02 11:08:32.000000000 +0000 @@ -20,6 +20,7 @@ import uuid from ryu.base import app_manager + from ryu.controller.handler import set_ev_cls from ryu.services.protocols.ovsdb import api as ovsdb from ryu.services.protocols.ovsdb import event as ovsdb_event @@ -28,15 +29,19 @@ @set_ev_cls(ovsdb_event.EventNewOVSDBConnection) def handle_new_ovsdb_connection(self, ev): system_id = ev.system_id + addr = ev.client.address self.logger.info('New OVSDB connection from system id %s', - systemd_id) + system_id) + self.logger.info('The connection address id %s', + addr) - def create_port(self, systemd_id, bridge_name, name): + def create_port(self, system_id, bridge_name, name): new_iface_uuid = uuid.uuid4() new_port_uuid = uuid.uuid4() + bridge = ovsdb.row_by_name(self, system_id, bridge_name) + def _create_port(tables, insert): - bridge = ovsdb.row_by_name(self, system_id, bridge_name) iface = insert(tables['Interface'], new_iface_uuid) iface.name = name @@ -46,7 +51,7 @@ port.name = name port.interfaces = [iface] - brdige.ports = bridfe.ports + [port] + bridge.ports = bridge.ports + [port] return (new_port_uuid, new_iface_uuid) @@ -58,4 +63,4 @@ name, bridge, rep.status) return None - return reply.insert_uuid[new_port_uuid] + return rep.insert_uuids[new_port_uuid] diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_arp.rst ryu-4.15/doc/source/library_packet_ref/packet_arp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_arp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_arp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +ARP +*** + +.. automodule:: ryu.lib.packet.arp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_base.rst ryu-4.15/doc/source/library_packet_ref/packet_base.rst --- ryu-4.9/doc/source/library_packet_ref/packet_base.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_base.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +***************** +Packet Base Class +***************** + +.. automodule:: ryu.lib.packet.packet_base + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_bfd.rst ryu-4.15/doc/source/library_packet_ref/packet_bfd.rst --- ryu-4.9/doc/source/library_packet_ref/packet_bfd.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_bfd.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +BFD +*** + +.. automodule:: ryu.lib.packet.bfd + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_bgp.rst ryu-4.15/doc/source/library_packet_ref/packet_bgp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_bgp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_bgp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +BGP +*** + +.. automodule:: ryu.lib.packet.bgp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_bmp.rst ryu-4.15/doc/source/library_packet_ref/packet_bmp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_bmp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_bmp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +BMP +*** + +.. automodule:: ryu.lib.packet.bmp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_bpdu.rst ryu-4.15/doc/source/library_packet_ref/packet_bpdu.rst --- ryu-4.9/doc/source/library_packet_ref/packet_bpdu.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_bpdu.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +BPDU +**** + +.. automodule:: ryu.lib.packet.bpdu + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_cfm.rst ryu-4.15/doc/source/library_packet_ref/packet_cfm.rst --- ryu-4.9/doc/source/library_packet_ref/packet_cfm.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_cfm.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +CFM +*** + +.. automodule:: ryu.lib.packet.cfm + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_dhcp6.rst ryu-4.15/doc/source/library_packet_ref/packet_dhcp6.rst --- ryu-4.9/doc/source/library_packet_ref/packet_dhcp6.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_dhcp6.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +***** +DHCP6 +***** + +.. automodule:: ryu.lib.packet.dhcp6 + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_dhcp.rst ryu-4.15/doc/source/library_packet_ref/packet_dhcp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_dhcp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_dhcp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +DHCP +**** + +.. automodule:: ryu.lib.packet.dhcp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_ethernet.rst ryu-4.15/doc/source/library_packet_ref/packet_ethernet.rst --- ryu-4.9/doc/source/library_packet_ref/packet_ethernet.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_ethernet.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +******** +Ehternet +******** + +.. automodule:: ryu.lib.packet.ethernet + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_geneve.rst ryu-4.15/doc/source/library_packet_ref/packet_geneve.rst --- ryu-4.9/doc/source/library_packet_ref/packet_geneve.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_geneve.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +****** +Geneve +****** + +.. automodule:: ryu.lib.packet.geneve + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_gre.rst ryu-4.15/doc/source/library_packet_ref/packet_gre.rst --- ryu-4.9/doc/source/library_packet_ref/packet_gre.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_gre.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +GRE +*** + +.. automodule:: ryu.lib.packet.gre + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_icmp.rst ryu-4.15/doc/source/library_packet_ref/packet_icmp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_icmp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_icmp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +ICMP +**** + +.. automodule:: ryu.lib.packet.icmp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_icmpv6.rst ryu-4.15/doc/source/library_packet_ref/packet_icmpv6.rst --- ryu-4.9/doc/source/library_packet_ref/packet_icmpv6.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_icmpv6.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +****** +ICMPv6 +****** + +.. automodule:: ryu.lib.packet.icmpv6 + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_igmp.rst ryu-4.15/doc/source/library_packet_ref/packet_igmp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_igmp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_igmp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +IGMP +**** + +.. automodule:: ryu.lib.packet.igmp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_ipv4.rst ryu-4.15/doc/source/library_packet_ref/packet_ipv4.rst --- ryu-4.9/doc/source/library_packet_ref/packet_ipv4.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_ipv4.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +IPv4 +**** + +.. automodule:: ryu.lib.packet.ipv4 + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_ipv6.rst ryu-4.15/doc/source/library_packet_ref/packet_ipv6.rst --- ryu-4.9/doc/source/library_packet_ref/packet_ipv6.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_ipv6.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +IPv6 +**** + +.. automodule:: ryu.lib.packet.ipv6 + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_llc.rst ryu-4.15/doc/source/library_packet_ref/packet_llc.rst --- ryu-4.9/doc/source/library_packet_ref/packet_llc.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_llc.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +LLC +*** + +.. automodule:: ryu.lib.packet.llc + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_lldp.rst ryu-4.15/doc/source/library_packet_ref/packet_lldp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_lldp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_lldp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +LLDP +**** + +.. automodule:: ryu.lib.packet.lldp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_mpls.rst ryu-4.15/doc/source/library_packet_ref/packet_mpls.rst --- ryu-4.9/doc/source/library_packet_ref/packet_mpls.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_mpls.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +MPLS +**** + +.. automodule:: ryu.lib.packet.mpls + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_openflow.rst ryu-4.15/doc/source/library_packet_ref/packet_openflow.rst --- ryu-4.9/doc/source/library_packet_ref/packet_openflow.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_openflow.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +******** +OpenFlow +******** + +.. automodule:: ryu.lib.packet.openflow + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_ospf.rst ryu-4.15/doc/source/library_packet_ref/packet_ospf.rst --- ryu-4.9/doc/source/library_packet_ref/packet_ospf.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_ospf.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +OSPF +**** + +.. automodule:: ryu.lib.packet.ospf + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_pbb.rst ryu-4.15/doc/source/library_packet_ref/packet_pbb.rst --- ryu-4.9/doc/source/library_packet_ref/packet_pbb.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_pbb.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +PBB +*** + +.. automodule:: ryu.lib.packet.pbb + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_sctp.rst ryu-4.15/doc/source/library_packet_ref/packet_sctp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_sctp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_sctp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +SCTP +**** + +.. automodule:: ryu.lib.packet.sctp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_slow.rst ryu-4.15/doc/source/library_packet_ref/packet_slow.rst --- ryu-4.9/doc/source/library_packet_ref/packet_slow.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_slow.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +Slow +**** + +.. automodule:: ryu.lib.packet.slow + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_tcp.rst ryu-4.15/doc/source/library_packet_ref/packet_tcp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_tcp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_tcp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +TCP +*** + +.. automodule:: ryu.lib.packet.tcp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_udp.rst ryu-4.15/doc/source/library_packet_ref/packet_udp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_udp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_udp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +*** +UDP +*** + +.. automodule:: ryu.lib.packet.udp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_vlan.rst ryu-4.15/doc/source/library_packet_ref/packet_vlan.rst --- ryu-4.9/doc/source/library_packet_ref/packet_vlan.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_vlan.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +VLAN +**** + +.. automodule:: ryu.lib.packet.vlan + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_vrrp.rst ryu-4.15/doc/source/library_packet_ref/packet_vrrp.rst --- ryu-4.9/doc/source/library_packet_ref/packet_vrrp.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_vrrp.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +**** +VRRP +**** + +.. automodule:: ryu.lib.packet.vrrp + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_vxlan.rst ryu-4.15/doc/source/library_packet_ref/packet_vxlan.rst --- ryu-4.9/doc/source/library_packet_ref/packet_vxlan.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_vxlan.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +***** +VXLAN +***** + +.. automodule:: ryu.lib.packet.vxlan + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref/packet_zebra.rst ryu-4.15/doc/source/library_packet_ref/packet_zebra.rst --- ryu-4.9/doc/source/library_packet_ref/packet_zebra.rst 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref/packet_zebra.rst 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,6 @@ +***** +Zebra +***** + +.. automodule:: ryu.lib.packet.zebra + :members: diff -Nru ryu-4.9/doc/source/library_packet_ref.rst ryu-4.15/doc/source/library_packet_ref.rst --- ryu-4.9/doc/source/library_packet_ref.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/doc/source/library_packet_ref.rst 2017-07-02 11:08:32.000000000 +0000 @@ -14,122 +14,15 @@ .. automodule:: ryu.lib.packet.stream_parser :members: -.. autoclass:: ryu.lib.packet.bgp.StreamParser - :members: +List of the sub-classes: + +- :py:mod:`ryu.lib.packet.bgp.StreamParser` Protocol Header classes ======================= -.. automodule:: ryu.lib.packet.packet_base - :members: - -.. automodule:: ryu.lib.packet.ethernet - :members: - -.. automodule:: ryu.lib.packet.vlan - :members: - -.. automodule:: ryu.lib.packet.pbb - :members: - -.. automodule:: ryu.lib.packet.mpls - :members: - -.. automodule:: ryu.lib.packet.arp - :members: - -.. automodule:: ryu.lib.packet.ipv4 - :members: - -.. automodule:: ryu.lib.packet.icmp - :members: - -.. automodule:: ryu.lib.packet.ipv6 - :members: - -.. automodule:: ryu.lib.packet.icmpv6 - :members: - -.. automodule:: ryu.lib.packet.gre - :members: - -.. automodule:: ryu.lib.packet.cfm - :members: - -.. automodule:: ryu.lib.packet.tcp - :members: - -.. automodule:: ryu.lib.packet.udp - :members: - -.. autoclass:: ryu.lib.packet.dhcp.dhcp - :members: -.. autoclass:: ryu.lib.packet.dhcp.options - :members: -.. autoclass:: ryu.lib.packet.dhcp.option - :members: - -.. autoclass:: ryu.lib.packet.vrrp.vrrp - :members: -.. autoclass:: ryu.lib.packet.vrrp.vrrpv2 - :members: -.. autoclass:: ryu.lib.packet.vrrp.vrrpv3 - :members: - -.. autoclass:: ryu.lib.packet.slow.slow - :members: -.. autoclass:: ryu.lib.packet.slow.lacp - :members: - -.. autoclass:: ryu.lib.packet.llc.llc - :members: -.. autoclass:: ryu.lib.packet.llc.ControlFormatI - :members: -.. autoclass:: ryu.lib.packet.llc.ControlFormatS - :members: -.. autoclass:: ryu.lib.packet.llc.ControlFormatU - :members: - -.. autoclass:: ryu.lib.packet.bpdu.bpdu - :members: -.. autoclass:: ryu.lib.packet.bpdu.ConfigurationBPDUs - :members: -.. autoclass:: ryu.lib.packet.bpdu.TopologyChangeNotificationBPDUs - :members: -.. autoclass:: ryu.lib.packet.bpdu.RstBPDUs - :members: - -.. autoclass:: ryu.lib.packet.igmp.igmp - :members: -.. autoclass:: ryu.lib.packet.igmp.igmpv3_query - :members: -.. autoclass:: ryu.lib.packet.igmp.igmpv3_report - :members: -.. autoclass:: ryu.lib.packet.igmp.igmpv3_report_group - :members: +.. toctree:: + :glob: -.. autoclass:: ryu.lib.packet.bgp.BGPMessage - :members: -.. autoclass:: ryu.lib.packet.bgp.BGPOpen - :members: -.. autoclass:: ryu.lib.packet.bgp.BGPUpdate - :members: -.. autoclass:: ryu.lib.packet.bgp.BGPKeepAlive - :members: -.. autoclass:: ryu.lib.packet.bgp.BGPNotification - :members: -.. automodule:: ryu.lib.packet.sctp - :members: - -.. autoclass:: ryu.lib.packet.bfd.bfd - :members: -.. autoclass:: ryu.lib.packet.bfd.SimplePassword - :members: -.. autoclass:: ryu.lib.packet.bfd.KeyedMD5 - :members: -.. autoclass:: ryu.lib.packet.bfd.MeticulousKeyedMD5 - :members: -.. autoclass:: ryu.lib.packet.bfd.KeyedSHA1 - :members: -.. autoclass:: ryu.lib.packet.bfd.MeticulousKeyedSHA1 - :members: + library_packet_ref/packet_base + library_packet_ref/* \ No newline at end of file diff -Nru ryu-4.9/doc/source/library.rst ryu-4.15/doc/source/library.rst --- ryu-4.9/doc/source/library.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/doc/source/library.rst 2017-07-02 11:08:32.000000000 +0000 @@ -13,4 +13,5 @@ library_of_config.rst library_bgp_speaker.rst library_bgp_speaker_ref.rst + library_mrt.rst library_ovsdb_manager.rst diff -Nru ryu-4.9/doc/source/using_with_openstack.rst ryu-4.15/doc/source/using_with_openstack.rst --- ryu-4.9/doc/source/using_with_openstack.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/doc/source/using_with_openstack.rst 2017-07-02 11:08:32.000000000 +0000 @@ -4,6 +4,13 @@ Using Ryu Network Operating System with OpenStack as OpenFlow controller ************************************************************************ +.. CAUTION:: + + The Ryu plugin and OFAgent described in the following is deprecated, + because Ryu is officially integrated into Open vSwitch agent with + "of_interface = native" mode. + + Ryu cooperates with OpenStack using Quantum Ryu plugin. The plugin is available in the official Quantum releases. diff -Nru ryu-4.9/README.rst ryu-4.15/README.rst --- ryu-4.9/README.rst 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/README.rst 2017-07-02 11:08:32.000000000 +0000 @@ -38,6 +38,7 @@ - OF-Config requires lxml and ncclient - NETCONF requires paramiko - BGP speaker (SSH console) requires paramiko +- Zebra protocol service (database) requires SQLAlchemy If you want to use the functionalities, please install requirements:: @@ -46,6 +47,16 @@ Please refer to tools/optional-requires for details. +Prerequisites +============= +If you got some error messages at installation step, please confirm +dependencies for building required Python packages. + +On Ubuntu(16.04 LTS or later):: + + % apt install gcc python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev zlib1g-dev + + Support ======= Ryu Official site is ``_. diff -Nru ryu-4.9/ryu/app/ofctl_rest.py ryu-4.15/ryu/app/ofctl_rest.py --- ryu-4.9/ryu/app/ofctl_rest.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/ofctl_rest.py 2017-07-02 11:08:32.000000000 +0000 @@ -14,10 +14,8 @@ # limitations under the License. import logging - import json import ast -from webob import Response from ryu.base import app_manager from ryu.controller import ofp_event @@ -35,8 +33,9 @@ from ryu.lib import ofctl_v1_3 from ryu.lib import ofctl_v1_4 from ryu.lib import ofctl_v1_5 -from ryu.app.wsgi import ControllerBase, WSGIApplication - +from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response +from ryu.app.wsgi import WSGIApplication LOG = logging.getLogger('ryu.app.ofctl_rest') @@ -417,6 +416,10 @@ else: return ofctl.get_port_desc(dp, self.waiters, port_no) + @stats_method + def get_role(self, req, dp, ofctl, **kwargs): + return ofctl.get_role(dp, self.waiters) + @command_method def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): cmd_convert = { @@ -675,6 +678,11 @@ controller=StatsController, action='get_port_desc', conditions=dict(method=['GET'])) + uri = path + '/role/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_role', + conditions=dict(method=['GET'])) + uri = path + '/flowentry/{cmd}' mapper.connect('stats', uri, controller=StatsController, action='mod_flow_entry', @@ -752,7 +760,9 @@ lock.set() @set_ev_cls([ofp_event.EventOFPSwitchFeatures, - ofp_event.EventOFPQueueGetConfigReply], MAIN_DISPATCHER) + ofp_event.EventOFPQueueGetConfigReply, + ofp_event.EventOFPRoleReply, + ], MAIN_DISPATCHER) def features_reply_handler(self, ev): msg = ev.msg dp = msg.datapath diff -Nru ryu-4.9/ryu/app/rest_conf_switch.py ryu-4.15/ryu/app/rest_conf_switch.py --- ryu-4.9/ryu/app/rest_conf_switch.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/rest_conf_switch.py 2017-07-02 11:08:32.000000000 +0000 @@ -21,12 +21,12 @@ Used by OpenStack Ryu agent. """ -from six.moves import http_client import json -import logging -from webob import Response + +from six.moves import http_client from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response from ryu.base import app_manager from ryu.controller import conf_switch from ryu.lib import dpid as dpid_lib diff -Nru ryu-4.9/ryu/app/rest_firewall.py ryu-4.15/ryu/app/rest_firewall.py --- ryu-4.9/ryu/app/rest_firewall.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/rest_firewall.py 2017-07-02 11:08:32.000000000 +0000 @@ -17,9 +17,8 @@ import logging import json -from webob import Response - from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response from ryu.app.wsgi import WSGIApplication from ryu.base import app_manager from ryu.controller import ofp_event diff -Nru ryu-4.9/ryu/app/rest_qos.py ryu-4.15/ryu/app/rest_qos.py --- ryu-4.9/ryu/app/rest_qos.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/rest_qos.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,10 +18,11 @@ import json import re -from webob import Response - from ryu.app import conf_switch_key as cs_key -from ryu.app.wsgi import ControllerBase, WSGIApplication, route +from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response +from ryu.app.wsgi import route +from ryu.app.wsgi import WSGIApplication from ryu.base import app_manager from ryu.controller import conf_switch from ryu.controller import ofp_event diff -Nru ryu-4.9/ryu/app/rest_router.py ryu-4.15/ryu/app/rest_router.py --- ryu-4.9/ryu/app/rest_router.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/rest_router.py 2017-07-02 11:08:32.000000000 +0000 @@ -20,9 +20,9 @@ import struct import json -from webob import Response from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response from ryu.app.wsgi import WSGIApplication from ryu.base import app_manager from ryu.controller import dpset @@ -1010,14 +1010,14 @@ else: if header_list[ARP].opcode == arp.ARP_REQUEST: # ARP request to router port -> send ARP reply - src_mac = header_list[ARP].src_mac - dst_mac = self.port_data[in_port].mac + src_mac = self.port_data[in_port].mac + dst_mac = header_list[ARP].src_mac arp_target_mac = dst_mac output = in_port in_port = self.ofctl.dp.ofproto.OFPP_CONTROLLER self.ofctl.send_arp(arp.ARP_REPLY, self.vlan_id, - dst_mac, src_mac, dst_ip, src_ip, + src_mac, dst_mac, dst_ip, src_ip, arp_target_mac, in_port, output) log_msg = 'Receive ARP request from [%s] to router port [%s].' diff -Nru ryu-4.9/ryu/app/rest_topology.py ryu-4.15/ryu/app/rest_topology.py --- ryu-4.9/ryu/app/rest_topology.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/rest_topology.py 2017-07-02 11:08:32.000000000 +0000 @@ -14,9 +14,11 @@ # limitations under the License. import json -from webob import Response -from ryu.app.wsgi import ControllerBase, WSGIApplication, route +from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response +from ryu.app.wsgi import route +from ryu.app.wsgi import WSGIApplication from ryu.base import app_manager from ryu.lib import dpid as dpid_lib from ryu.topology.api import get_switch, get_link, get_host diff -Nru ryu-4.9/ryu/app/simple_switch_rest_13.py ryu-4.15/ryu/app/simple_switch_rest_13.py --- ryu-4.9/ryu/app/simple_switch_rest_13.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/simple_switch_rest_13.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,11 +16,13 @@ import json from ryu.app import simple_switch_13 -from webob import Response from ryu.controller import ofp_event from ryu.controller.handler import CONFIG_DISPATCHER from ryu.controller.handler import set_ev_cls -from ryu.app.wsgi import ControllerBase, WSGIApplication, route +from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import Response +from ryu.app.wsgi import route +from ryu.app.wsgi import WSGIApplication from ryu.lib import dpid as dpid_lib simple_switch_instance_name = 'simple_switch_api_app' diff -Nru ryu-4.9/ryu/app/simple_switch_websocket_13.py ryu-4.15/ryu/app/simple_switch_websocket_13.py --- ryu-4.9/ryu/app/simple_switch_websocket_13.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/simple_switch_websocket_13.py 2017-07-02 11:08:32.000000000 +0000 @@ -42,15 +42,14 @@ 15:0c:de:49": 2}}} """ -import json - -from webob import Response from ryu.app import simple_switch_13 -from ryu.app.wsgi import route, websocket, ControllerBase, WSGIApplication -from ryu.app.wsgi import rpc_public, WebSocketRPCServer +from ryu.app.wsgi import ControllerBase +from ryu.app.wsgi import rpc_public +from ryu.app.wsgi import websocket +from ryu.app.wsgi import WebSocketRPCServer +from ryu.app.wsgi import WSGIApplication from ryu.controller import ofp_event from ryu.controller.handler import set_ev_cls -from ryu.lib import hub from ryu.lib.packet import packet diff -Nru ryu-4.9/ryu/app/wsgi.py ryu-4.15/ryu/app/wsgi.py --- ryu-4.9/ryu/app/wsgi.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/app/wsgi.py 2017-07-02 11:08:32.000000000 +0000 @@ -28,16 +28,23 @@ from tinyrpc.client import RPCClient import webob.dec import webob.exc -from webob.response import Response +from webob.request import Request as webob_Request +from webob.response import Response as webob_Response from ryu import cfg from ryu.lib import hub +DEFAULT_WSGI_HOST = '0.0.0.0' +DEFAULT_WSGI_PORT = 8080 CONF = cfg.CONF CONF.register_cli_opts([ - cfg.StrOpt('wsapi-host', default='', help='webapp listen host'), - cfg.IntOpt('wsapi-port', default=8080, help='webapp listen port') + cfg.StrOpt( + 'wsapi-host', default=DEFAULT_WSGI_HOST, + help='webapp listen host (default %s)' % DEFAULT_WSGI_HOST), + cfg.IntOpt( + 'wsapi-port', default=DEFAULT_WSGI_PORT, + help='webapp listen port (default %s)' % DEFAULT_WSGI_PORT), ]) HEX_PATTERN = r'0x[0-9a-z]+' @@ -56,6 +63,33 @@ return _route +class Request(webob_Request): + """ + Wrapper class for webob.request.Request. + + The behavior of this class is the same as webob.request.Request + except for setting "charset" to "UTF-8" automatically. + """ + DEFAULT_CHARSET = "UTF-8" + + def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs): + super(Request, self).__init__( + environ, charset=charset, *args, **kwargs) + + +class Response(webob_Response): + """ + Wrapper class for webob.response.Response. + + The behavior of this class is the same as webob.response.Response + except for setting "charset" to "UTF-8" automatically. + """ + DEFAULT_CHARSET = "UTF-8" + + def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs): + super(Response, self).__init__(charset=charset, *args, **kwargs) + + class WebSocketRegistrationWrapper(object): def __init__(self, func, controller): diff -Nru ryu-4.9/ryu/cmd/manager.py ryu-4.15/ryu/cmd/manager.py --- ryu-4.9/ryu/cmd/manager.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/cmd/manager.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,6 +16,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import sys + from ryu.lib import hub hub.patch(thread=False) @@ -43,10 +46,28 @@ cfg.BoolOpt('enable-debugger', default=False, help='don\'t overwrite Python standard threading library' '(use only for debugging)'), + cfg.StrOpt('user-flags', default=None, + help='Additional flags file for user applications'), ]) +def _parse_user_flags(): + """ + Parses user-flags file and loads it to register user defined options. + """ + try: + idx = list(sys.argv).index('--user-flags') + user_flags_file = sys.argv[idx + 1] + except (ValueError, IndexError): + user_flags_file = '' + + if user_flags_file and os.path.isfile(user_flags_file): + from ryu.utils import _import_module_file + _import_module_file(user_flags_file) + + def main(args=None, prog=None): + _parse_user_flags() try: CONF(args=args, prog=prog, project='ryu', version='ryu-manager %s' % version, @@ -65,7 +86,6 @@ hub.patch(thread=True) if CONF.pid_file: - import os with open(CONF.pid_file, 'w') as pid_file: pid_file.write(str(os.getpid())) diff -Nru ryu-4.9/ryu/cmd/rpc_cli.py ryu-4.15/ryu/cmd/rpc_cli.py --- ryu-4.9/ryu/cmd/rpc_cli.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/cmd/rpc_cli.py 2017-07-02 11:08:32.000000000 +0000 @@ -31,6 +31,7 @@ from __future__ import print_function +import ast import cmd import signal import socket @@ -43,8 +44,12 @@ CONF = cfg.CONF CONF.register_cli_opts([ - # eg. rpc-cli --peers=hoge=localhost:9998,fuga=localhost:9999 - cfg.ListOpt('peers', default=[], help='list of peers') + cfg.ListOpt('peers', default=[], + help='List of peers, separated by commas. ' + '(e.g., "hoge=localhost:9998,fuga=localhost:9999")'), + cfg.StrOpt('command', short='c', default=None, + help='Command to be executed as single command. ' + 'The default is None and opens interactive console.'), ]) @@ -52,16 +57,18 @@ def __init__(self, name, addr): self._name = name self._addr = addr + self.socket = None self.client = None try: self.connect() - except: - pass + except ConnectionError as e: + print('Exception when connecting to peer "%s": %s' % (name, e)) + raise e def connect(self): - self.client = None - s = socket.create_connection(self._addr) - self.client = rpc.Client(s, notification_callback=self.notification) + self.socket = socket.create_connection(self._addr) + self.client = rpc.Client(self.socket, + notification_callback=self.notification) def try_to_connect(self, verbose=False): if self.client: @@ -100,12 +107,25 @@ print("connected. retrying the request...") return g() + def close(self): + self.socket.close() + peers = {} def add_peer(name, host, port): - peers[name] = Peer(name, (host, port)) + try: + peer = Peer(name, (host, port)) + except ConnectionError: + return + + peers[name] = peer + + +def close_peers(): + for peer in peers.values(): + peer.socket.close() class Cmd(cmd.Cmd): @@ -120,9 +140,9 @@ try: peer = args[0] method = args[1] - params = eval(args[2]) - except: - print("argument error") + params = ast.literal_eval(args[2]) + except (IndexError, ValueError) as e: + print("argument error: %s" % e) return try: p = peers[peer] @@ -170,7 +190,8 @@ def complete_notify(self, text, line, begidx, endidx): return self._complete_peer(text, line, begidx, endidx) - def do_EOF(self, _line): + def do_EOF(self, _line=None): + close_peers() sys.exit(0) def emptyline(self): @@ -201,6 +222,9 @@ signal.signal(signal.SIGALRM, self._timeout) signal.alarm(1) + def postloop(self): + close_peers() + def onecmd(self, string): self._in_onecmd = True try: @@ -229,6 +253,11 @@ host, port = addr.rsplit(':', 1) add_peer(name, host, port) + if CONF.command: + command = Cmd() + command.onecmd(CONF.command) + command.do_EOF() + Cmd().cmdloop() diff -Nru ryu-4.9/ryu/controller/controller.py ryu-4.15/ryu/controller/controller.py --- ryu-4.9/ryu/controller/controller.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/controller/controller.py 2017-07-02 11:08:32.000000000 +0000 @@ -48,9 +48,12 @@ LOG = logging.getLogger('ryu.controller.controller') +DEFAULT_OFP_HOST = '0.0.0.0' + CONF = cfg.CONF CONF.register_cli_opts([ - cfg.StrOpt('ofp-listen-host', default='', help='openflow listen host'), + cfg.StrOpt('ofp-listen-host', default=DEFAULT_OFP_HOST, + help='openflow listen host (default %s)' % DEFAULT_OFP_HOST), cfg.IntOpt('ofp-tcp-listen-port', default=None, help='openflow tcp listen port ' '(default: %d)' % ofproto_common.OFP_TCP_PORT), diff -Nru ryu-4.9/ryu/exception.py ryu-4.15/ryu/exception.py --- ryu-4.9/ryu/exception.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/exception.py 2017-07-02 11:08:32.000000000 +0000 @@ -52,6 +52,10 @@ super(OFPTruncatedMessage, self).__init__(msg, **kwargs) +class OFPInvalidActionString(RyuException): + message = 'unable to parse: %(action_str)s' + + class NetworkNotFound(RyuException): message = 'no such network id %(network_id)s' diff -Nru ryu-4.9/ryu/flags.py ryu-4.15/ryu/flags.py --- ryu-4.9/ryu/flags.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/flags.py 2017-07-02 11:08:32.000000000 +0000 @@ -62,7 +62,6 @@ DEFAULT_RPC_PORT = 50002 DEFAULT_RPC_HOST = '0.0.0.0' -CONF = cfg.CONF CONF.register_cli_opts([ cfg.IntOpt('rpc-port', default=DEFAULT_RPC_PORT, help='Port for RPC server (default: %s)' % DEFAULT_RPC_PORT), @@ -72,3 +71,44 @@ help='The config file formatted in Python source file. ' 'Please refer to "bgp_sample_conf.py" for details.') ], group='bgp-app') + + +DEFAULT_ZSERV_HOST = '/var/run/quagga/zserv.api' +DEFAULT_ZSERV_PORT = 2600 +DEFAULT_ZSERV_VERSION = 2 # Version of Ubuntu 16.04 LTS packaged Quagga +DEFAULT_ZSERV_CLIENT_ROUTE_TYPE = 'BGP' +DEFAULT_ZSERV_INTERVAL = 10 +DEFAULT_ZSERV_DATABASE = 'sqlite:///zebra.db' +DEFAULT_ZSERV_ROUTER_ID = '1.1.1.1' + +CONF.register_cli_opts([ + cfg.StrOpt( + 'server-host', default=DEFAULT_ZSERV_HOST, + help='Path to Unix Socket or IP address of Zebra server ' + '(default: %s)' % DEFAULT_ZSERV_HOST), + cfg.IntOpt( + 'server-port', default=DEFAULT_ZSERV_PORT, + help='Port number of Zebra server ' + '(default: %s)' + % DEFAULT_ZSERV_PORT), + cfg.IntOpt( + 'server-version', default=DEFAULT_ZSERV_VERSION, + help='Zebra protocol version of Zebra server ' + '(default: %s)' % DEFAULT_ZSERV_VERSION), + cfg.StrOpt( + 'client-route-type', default=DEFAULT_ZSERV_CLIENT_ROUTE_TYPE, + help='Zebra route type advertised by Zebra client service. ' + '(default: %s)' % DEFAULT_ZSERV_CLIENT_ROUTE_TYPE), + cfg.IntOpt( + 'retry-interval', default=DEFAULT_ZSERV_INTERVAL, + help='Retry interval connecting to Zebra server ' + '(default: %s)' % DEFAULT_ZSERV_INTERVAL), + cfg.StrOpt( + 'db-url', default=DEFAULT_ZSERV_DATABASE, + help='URL to database used by Zebra protocol service ' + '(default: %s)' % DEFAULT_ZSERV_DATABASE), + cfg.StrOpt( + 'router-id', default=DEFAULT_ZSERV_ROUTER_ID, + help='Initial Router ID used by Zebra protocol service ' + '(default: %s)' % DEFAULT_ZSERV_ROUTER_ID), +], group='zapi') diff -Nru ryu-4.9/ryu/__init__.py ryu-4.15/ryu/__init__.py --- ryu-4.9/ryu/__init__.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/__init__.py 2017-07-02 11:08:32.000000000 +0000 @@ -14,5 +14,5 @@ # limitations under the License. -version_info = (4, 9) +version_info = (4, 15) version = '.'.join(map(str, version_info)) diff -Nru ryu-4.9/ryu/lib/addrconv.py ryu-4.15/ryu/lib/addrconv.py --- ryu-4.9/ryu/lib/addrconv.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/addrconv.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,20 +18,32 @@ class AddressConverter(object): - def __init__(self, addr, strat, **kwargs): + def __init__(self, addr, strat, fallback=None, **kwargs): self._addr = addr self._strat = strat + self._fallback = fallback self._addr_kwargs = kwargs def text_to_bin(self, text): - return self._addr(text, **self._addr_kwargs).packed + try: + return self._addr(text, **self._addr_kwargs).packed + except Exception as e: + if self._fallback is None: + raise e + + # text_to_bin is expected to return binary string under + # normal circumstances. See ofproto.oxx_fields._from_user. + ip = self._fallback(text, **self._addr_kwargs) + return ip.ip.packed, ip.netmask.packed def bin_to_text(self, bin): return str(self._addr(self._strat.packed_to_int(bin), **self._addr_kwargs)) -ipv4 = AddressConverter(netaddr.IPAddress, netaddr.strategy.ipv4, version=4) -ipv6 = AddressConverter(netaddr.IPAddress, netaddr.strategy.ipv6, version=6) +ipv4 = AddressConverter(netaddr.IPAddress, netaddr.strategy.ipv4, + fallback=netaddr.IPNetwork, version=4) +ipv6 = AddressConverter(netaddr.IPAddress, netaddr.strategy.ipv6, + fallback=netaddr.IPNetwork, version=6) class mac_mydialect(netaddr.mac_unix): diff -Nru ryu-4.9/ryu/lib/hub.py ryu-4.15/ryu/lib/hub.py --- ryu-4.9/ryu/lib/hub.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/hub.py 2017-07-02 11:08:32.000000000 +0000 @@ -17,6 +17,8 @@ import logging import os +import netaddr + # We don't bother to use cfg.py because monkey patch needs to be # called very early. Instead, we use an environment variable to @@ -27,6 +29,10 @@ if HUB_TYPE == 'eventlet': import eventlet + # HACK: + # sleep() is the workaround for the following issue. + # https://github.com/eventlet/eventlet/issues/401 + eventlet.sleep() import eventlet.event import eventlet.queue import eventlet.semaphore @@ -110,11 +116,16 @@ assert backlog is None assert spawn == 'default' - if ':' in listen_info[0]: + if netaddr.valid_ipv6(listen_info[0]): self.server = eventlet.listen(listen_info, family=socket.AF_INET6) + elif os.path.isdir(os.path.dirname(listen_info[0])): + # Case for Unix domain socket + self.server = eventlet.listen(listen_info[0], + family=socket.AF_UNIX) else: self.server = eventlet.listen(listen_info) + if ssl_args: def wrap_and_handle(sock, addr): ssl_args.setdefault('server_side', True) diff -Nru ryu-4.9/ryu/lib/ip.py ryu-4.15/ryu/lib/ip.py --- ryu-4.9/ryu/lib/ip.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ip.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,10 +16,50 @@ import numbers import struct +import netaddr + from ryu.lib import addrconv from ryu.lib import type_desc +def _valid_ip(strategy, bits, addr, flags=0): + addr = addr.split('/') + if len(addr) == 1: + return strategy(addr[0], flags) + elif len(addr) == 2: + return strategy(addr[0], flags) and 0 <= int(addr[1]) <= bits + else: + return False + + +def valid_ipv4(addr, flags=0): + """ + Wrapper function of "netaddr.valid_ipv4()". + + The function extends "netaddr.valid_ipv4()" to enable to validate + IPv4 network address in "xxx.xxx.xxx.xxx/xx" format. + + :param addr: IP address to be validated. + :param flags: See the "netaddr.valid_ipv4()" docs for details. + :return: True is valid. False otherwise. + """ + return _valid_ip(netaddr.valid_ipv4, 32, addr, flags) + + +def valid_ipv6(addr, flags=0): + """ + Wrapper function of "netaddr.valid_ipv6()". + + The function extends "netaddr.valid_ipv6()" to enable to validate + IPv4 network address in "xxxx:xxxx:xxxx::/xx" format. + + :param addr: IP address to be validated. + :param flags: See the "netaddr.valid_ipv6()" docs for details. + :return: True is valid. False otherwise. + """ + return _valid_ip(netaddr.valid_ipv6, 128, addr, flags) + + def ipv4_to_bin(ip): """ Converts human readable IPv4 string to binary representation. diff -Nru ryu-4.9/ryu/lib/mrtlib.py ryu-4.15/ryu/lib/mrtlib.py --- ryu-4.9/ryu/lib/mrtlib.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/mrtlib.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,1224 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Library for reading/writing MRT (Multi-Threaded Routing Toolkit) Routing +Information Export Format [RFC6396]. +""" + +import abc +import logging +import struct +import time + +import netaddr +import six + +from ryu.lib import addrconv +from ryu.lib import ip +from ryu.lib import stringify +from ryu.lib import type_desc +from ryu.lib.packet import bgp +from ryu.lib.packet import ospf + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class MrtRecord(stringify.StringifyMixin, type_desc.TypeDisp): + """ + MRT record. + """ + _HEADER_FMT = '!IHHI' # the same as MRT Common Header + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + MESSAGE_CLS = None # parser class for message field + + # MRT Types + TYPE_OSPFv2 = 11 + TYPE_TABLE_DUMP = 12 + TYPE_TABLE_DUMP_V2 = 13 + TYPE_BGP4MP = 16 + TYPE_BGP4MP_ET = 17 + TYPE_ISIS = 32 + TYPE_ISIS_ET = 33 + TYPE_OSPFv3 = 48 + TYPE_OSPFv3_ET = 49 + + # List of MRT type using Extended Timestamp MRT Header + _EXT_TS_TYPES = [TYPE_BGP4MP_ET, TYPE_ISIS_ET, TYPE_OSPFv3_ET] + + def __init__(self, message, timestamp=None, type_=None, subtype=None, + length=None): + assert issubclass(message.__class__, MrtMessage) + self.message = message + self.timestamp = timestamp + if type_ is None: + type_ = self._rev_lookup_type(self.__class__) + self.type = type_ + if subtype is None: + subtype = self.MESSAGE_CLS._rev_lookup_type(message.__class__) + self.subtype = subtype + self.length = length + + @classmethod + def parse_common_header(cls, buf): + header_fields = struct.unpack_from( + cls._HEADER_FMT, buf) + + return list(header_fields), buf[cls.HEADER_SIZE:] + + @classmethod + def parse_extended_header(cls, buf): + # If extended header field exist, override this in subclass. + return [], buf + + @classmethod + def parse_pre(cls, buf): + buf = six.binary_type(buf) # for convenience + + header_fields, _ = cls.parse_common_header(buf) + # timestamp = header_fields[0] + type_ = header_fields[1] + # subtype = header_fields[2] + length = header_fields[3] + if type_ in cls._EXT_TS_TYPES: + header_cls = ExtendedTimestampMrtRecord + else: + header_cls = MrtCommonRecord + + required_len = header_cls.HEADER_SIZE + length + + return required_len + + @classmethod + def parse(cls, buf): + buf = six.binary_type(buf) # for convenience + + header_fields, rest = cls.parse_common_header(buf) + # timestamp = header_fields[0] + type_ = header_fields[1] + subtype = header_fields[2] + length = header_fields[3] + + sub_cls = MrtRecord._lookup_type(type_) + extended_headers, rest = sub_cls.parse_extended_header(rest) + header_fields.extend(extended_headers) + + msg_cls = sub_cls.MESSAGE_CLS._lookup_type(subtype) + message_bin = rest[:length] + message = msg_cls.parse(message_bin) + + return sub_cls(message, *header_fields), rest[length:] + + @abc.abstractmethod + def serialize_header(self): + pass + + def serialize(self): + if self.timestamp is None: + self.timestamp = int(time.time()) + + buf = self.message.serialize() + + self.length = len(buf) # fixup + + return self.serialize_header() + buf + + +class MrtCommonRecord(MrtRecord): + """ + MRT record using MRT Common Header. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Timestamp | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type | Subtype | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Message... (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IHHI' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def serialize_header(self): + return struct.pack(self._HEADER_FMT, + self.timestamp, + self.type, self.subtype, + self.length) + + +class ExtendedTimestampMrtRecord(MrtRecord): + """ + MRT record using Extended Timestamp MRT Header. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Timestamp | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type | Subtype | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Microsecond Timestamp | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Message... (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IHHII' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _EXT_HEADER_FMT = '!I' + EXT_HEADER_SIZE = struct.calcsize(_EXT_HEADER_FMT) + + def __init__(self, message, timestamp=None, type_=None, subtype=None, + ms_timestamp=None, length=None): + super(ExtendedTimestampMrtRecord, self).__init__( + message, timestamp, type_, subtype, length) + self.ms_timestamp = ms_timestamp + + @classmethod + def parse_extended_header(cls, buf): + (ms_timestamp,) = struct.unpack_from(cls._EXT_HEADER_FMT, buf) + + return [ms_timestamp], buf[cls.EXT_HEADER_SIZE:] + + def serialize_header(self): + return struct.pack(self._HEADER_FMT, + self.timestamp, + self.type, self.subtype, + self.length, + self.ms_timestamp) + + +@six.add_metaclass(abc.ABCMeta) +class MrtMessage(stringify.StringifyMixin, type_desc.TypeDisp): + """ + MRT Message in record. + """ + + @classmethod + @abc.abstractmethod + def parse(cls, buf): + pass + + @abc.abstractmethod + def serialize(self): + pass + + +class UnknownMrtMessage(MrtMessage): + """ + MRT Message for the UNKNOWN Type. + """ + + def __init__(self, buf): + self.buf = buf + + @classmethod + def parse(cls, buf): + return cls(buf) + + def serialize(self): + return self.buf + +# Registers self to unknown(default) type +UnknownMrtMessage._UNKNOWN_TYPE = UnknownMrtMessage + + +@MrtRecord.register_unknown_type() +class UnknownMrtRecord(MrtCommonRecord): + """ + MRT record for the UNKNOWN Type. + """ + MESSAGE_CLS = UnknownMrtMessage + + +class Ospf2MrtMessage(MrtMessage): + """ + MRT Message for the OSPFv2 Type. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Remote IP Address | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local IP Address | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | OSPF Message Contents (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!4s4s' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, remote_ip, local_ip, ospf_message): + self.remote_ip = remote_ip + self.local_ip = local_ip + assert isinstance(ospf_message, ospf.OSPFMessage) + self.ospf_message = ospf_message + + @classmethod + def parse(cls, buf): + (remote_ip, local_ip) = struct.unpack_from(cls._HEADER_FMT, buf) + remote_ip = addrconv.ipv4.bin_to_text(remote_ip) + local_ip = addrconv.ipv4.bin_to_text(local_ip) + ospf_message, _, _ = ospf.OSPFMessage.parser(buf[cls.HEADER_SIZE:]) + + return cls(remote_ip, local_ip, ospf_message) + + def serialize(self): + return (addrconv.ipv4.text_to_bin(self.remote_ip) + + addrconv.ipv4.text_to_bin(self.local_ip) + + self.ospf_message.serialize()) + + +@MrtRecord.register_type(MrtRecord.TYPE_OSPFv2) +class Ospf2MrtRecord(MrtCommonRecord): + """ + MRT Record for the OSPFv2 Type. + """ + MESSAGE_CLS = Ospf2MrtMessage + + def __init__(self, message, timestamp=None, type_=None, subtype=0, + length=None): + super(Ospf2MrtRecord, self).__init__( + message=message, timestamp=timestamp, type_=type_, + subtype=subtype, length=length) + +# Registers self to unknown(default) type +Ospf2MrtMessage._UNKNOWN_TYPE = Ospf2MrtMessage + + +@six.add_metaclass(abc.ABCMeta) +class TableDumpMrtMessage(MrtMessage): + """ + MRT Message for the TABLE_DUMP Type. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | View Number | Sequence Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Prefix (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Prefix Length | Status | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Originated Time | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer AS | Attribute Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | BGP Attribute... (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '' # should be defined in subclass + HEADER_SIZE = 0 + + def __init__(self, view_num, seq_num, prefix, prefix_len, status, + originated_time, peer_ip, peer_as, bgp_attributes, + attr_len=None): + self.view_num = view_num + self.seq_num = seq_num + self.prefix = prefix + self.prefix_len = prefix_len + # Status in the TABLE_DUMP Type SHOULD be set to 1 + assert status == 1 + self.status = status + self.originated_time = originated_time + self.peer_ip = peer_ip + self.peer_as = peer_as + self.attr_len = attr_len + assert isinstance(bgp_attributes, (list, tuple)) + for attr in bgp_attributes: + assert isinstance(attr, bgp._PathAttribute) + self.bgp_attributes = bgp_attributes + + @classmethod + def parse(cls, buf): + (view_num, seq_num, prefix, prefix_len, status, originated_time, + peer_ip, peer_as, attr_len) = struct.unpack_from(cls._HEADER_FMT, buf) + prefix = ip.bin_to_text(prefix) + peer_ip = ip.bin_to_text(peer_ip) + + bgp_attr_bin = buf[cls.HEADER_SIZE:cls.HEADER_SIZE + attr_len] + bgp_attributes = [] + while bgp_attr_bin: + attr, bgp_attr_bin = bgp._PathAttribute.parser(bgp_attr_bin) + bgp_attributes.append(attr) + + return cls(view_num, seq_num, prefix, prefix_len, status, + originated_time, peer_ip, peer_as, bgp_attributes, + attr_len) + + def serialize(self): + bgp_attrs_bin = bytearray() + for attr in self.bgp_attributes: + bgp_attrs_bin += attr.serialize() + self.attr_len = len(bgp_attrs_bin) # fixup + + prefix = ip.text_to_bin(self.prefix) + peer_ip = ip.text_to_bin(self.peer_ip) + + return struct.pack(self._HEADER_FMT, + self.view_num, self.seq_num, + prefix, + self.prefix_len, self.status, + self.originated_time, + peer_ip, + self.peer_as, self.attr_len) + bgp_attrs_bin + + +@MrtRecord.register_type(MrtRecord.TYPE_TABLE_DUMP) +class TableDumpMrtRecord(MrtCommonRecord): + """ + MRT Record for the TABLE_DUMP Type. + """ + MESSAGE_CLS = TableDumpMrtMessage + + # MRT Subtype + SUBTYPE_AFI_IPv4 = 1 + SUBTYPE_AFI_IPv6 = 2 + + +@TableDumpMrtMessage.register_type(TableDumpMrtRecord.SUBTYPE_AFI_IPv4) +class TableDumpAfiIPv4MrtMessage(TableDumpMrtMessage): + """ + MRT Message for the TABLE_DUMP Type and the AFI_IPv4 subtype. + """ + _HEADER_FMT = '!HH4sBBI4sHH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + +@TableDumpMrtMessage.register_type(TableDumpMrtRecord.SUBTYPE_AFI_IPv6) +class TableDumpAfiIPv6MrtMessage(TableDumpMrtMessage): + """ + MRT Message for the TABLE_DUMP Type and the AFI_IPv6 subtype. + """ + _HEADER_FMT = '!HH16sBBI16sHH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + +@six.add_metaclass(abc.ABCMeta) +class TableDump2MrtMessage(MrtMessage): + """ + MRT Message for the TABLE_DUMP_V2 Type. + """ + + +@MrtRecord.register_type(MrtRecord.TYPE_TABLE_DUMP_V2) +class TableDump2MrtRecord(MrtCommonRecord): + MESSAGE_CLS = TableDump2MrtMessage + + # MRT Subtype + SUBTYPE_PEER_INDEX_TABLE = 1 + SUBTYPE_RIB_IPV4_UNICAST = 2 + SUBTYPE_RIB_IPV4_MULTICAST = 3 + SUBTYPE_RIB_IPV6_UNICAST = 4 + SUBTYPE_RIB_IPV6_MULTICAST = 5 + SUBTYPE_RIB_GENERIC = 6 + + +@TableDump2MrtMessage.register_type( + TableDump2MrtRecord.SUBTYPE_PEER_INDEX_TABLE) +class TableDump2PeerIndexTableMrtMessage(TableDump2MrtMessage): + """ + MRT Message for the TABLE_DUMP_V2 Type and the PEER_INDEX_TABLE subtype. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Collector BGP ID | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | View Name Length | View Name (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer Count | Peer Entries (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!4sH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _PEER_COUNT_FMT = '!H' + PEER_COUNT_SIZE = struct.calcsize(_PEER_COUNT_FMT) + + def __init__(self, bgp_id, peer_entries, + view_name='', view_name_len=None, peer_count=None): + self.bgp_id = bgp_id + assert isinstance(peer_entries, (list, tuple)) + for p in peer_entries: + assert isinstance(p, MrtPeer) + self.peer_entries = peer_entries + assert isinstance(view_name, str) + self.view_name = view_name + self.view_name_len = view_name_len + self.peer_count = peer_count + + @classmethod + def parse(cls, buf): + (bgp_id, view_name_len) = struct.unpack_from(cls._HEADER_FMT, buf) + bgp_id = addrconv.ipv4.bin_to_text(bgp_id) + offset = cls.HEADER_SIZE + + (view_name,) = struct.unpack_from('!%ds' % view_name_len, buf, offset) + view_name = str(view_name.decode('utf-8')) + offset += view_name_len + + (peer_count,) = struct.unpack_from(cls._PEER_COUNT_FMT, buf, offset) + offset += cls.PEER_COUNT_SIZE + + rest = buf[offset:] + peer_entries = [] + for i in range(peer_count): + p, rest = MrtPeer.parse(rest) + peer_entries.insert(i, p) + + return cls(bgp_id, peer_entries, view_name, view_name_len, peer_count) + + def serialize(self): + view_name = self.view_name.encode('utf-8') + self.view_name_len = len(view_name) # fixup + + self.peer_count = len(self.peer_entries) # fixup + + buf = struct.pack(self._HEADER_FMT, + addrconv.ipv4.text_to_bin(self.bgp_id), + self.view_name_len) + view_name + + buf += struct.pack(self._PEER_COUNT_FMT, + self.peer_count) + + for p in self.peer_entries: + buf += p.serialize() + + return buf + + +class MrtPeer(stringify.StringifyMixin): + """ + MRT Peer. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer Type | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer BGP ID | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer AS (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!B4s' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + # Peer Type field: + # + # 0 1 2 3 4 5 6 7 + # +-+-+-+-+-+-+-+-+ + # | | | | | | |A|I| + # +-+-+-+-+-+-+-+-+ + # + # Bit 6: Peer AS number size: 0 = 2 bytes, 1 = 4 bytes + # Bit 7: Peer IP Address family: 0 = IPv4(4 bytes), 1 = IPv6(16 bytes) + IP_ADDR_FAMILY_BIT = 1 << 0 + AS_NUMBER_SIZE_BIT = 1 << 1 + + def __init__(self, bgp_id, ip_addr, as_num, type_=0): + self.type = type_ + self.bgp_id = bgp_id + self.ip_addr = ip_addr + self.as_num = as_num + + @classmethod + def parse(cls, buf): + (type_, bgp_id) = struct.unpack_from(cls._HEADER_FMT, buf) + bgp_id = addrconv.ipv4.bin_to_text(bgp_id) + offset = cls.HEADER_SIZE + + if type_ & cls.IP_ADDR_FAMILY_BIT: + # IPv6 address family + ip_addr_len = 16 + else: + # IPv4 address family + ip_addr_len = 4 + ip_addr = ip.bin_to_text(buf[offset:offset + ip_addr_len]) + offset += ip_addr_len + + if type_ & cls.AS_NUMBER_SIZE_BIT: + # Four octet AS number + (as_num,) = struct.unpack_from('!I', buf, offset) + offset += 4 + else: + # Two octet AS number + (as_num,) = struct.unpack_from('!H', buf, offset) + offset += 2 + + return cls(bgp_id, ip_addr, as_num, type_), buf[offset:] + + def serialize(self): + if netaddr.valid_ipv6(self.ip_addr): + # Sets Peer IP Address family bit to IPv6 + self.type |= self.IP_ADDR_FAMILY_BIT + ip_addr = ip.text_to_bin(self.ip_addr) + + if self.type & self.AS_NUMBER_SIZE_BIT or self.as_num > 0xffff: + # Four octet AS number + self.type |= self.AS_NUMBER_SIZE_BIT + as_num = struct.pack('!I', self.as_num) + else: + # Two octet AS number + as_num = struct.pack('!H', self.as_num) + + buf = struct.pack(self._HEADER_FMT, + self.type, + addrconv.ipv4.text_to_bin(self.bgp_id)) + + return buf + ip_addr + as_num + + +@six.add_metaclass(abc.ABCMeta) +class TableDump2AfiSafiSpecificRibMrtMessage(TableDump2MrtMessage): + """ + MRT Message for the TABLE_DUMP_V2 Type and the AFI/SAFI-specific + RIB subtypes. + + The AFI/SAFI-specific RIB subtypes consist of the RIB_IPV4_UNICAST, + RIB_IPV4_MULTICAST, RIB_IPV6_UNICAST, and RIB_IPV6_MULTICAST subtypes. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Sequence Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Prefix Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Prefix (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Entry Count | RIB Entries (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!I' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + # Parser class to parse the Prefix field + _PREFIX_CLS = None # should be defined in subclass + + def __init__(self, seq_num, prefix, rib_entries, entry_count=None): + self.seq_num = seq_num + assert isinstance(prefix, self._PREFIX_CLS) + self.prefix = prefix + self.entry_count = entry_count + assert isinstance(rib_entries, (list, tuple)) + for rib_entry in rib_entries: + assert isinstance(rib_entry, MrtRibEntry) + self.rib_entries = rib_entries + + @classmethod + def parse_rib_entries(cls, buf): + (entry_count,) = struct.unpack_from('!H', buf) + + rest = buf[2:] + rib_entries = [] + for i in range(entry_count): + r, rest = MrtRibEntry.parse(rest) + rib_entries.insert(i, r) + + return entry_count, rib_entries, rest + + @classmethod + def parse(cls, buf): + (seq_num,) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + prefix, rest = cls._PREFIX_CLS.parser(rest) + + entry_count, rib_entries, _ = cls.parse_rib_entries(rest) + + return cls(seq_num, prefix, rib_entries, entry_count) + + def serialize_rib_entries(self): + self.entry_count = len(self.rib_entries) # fixup + + rib_entries_bin = bytearray() + for r in self.rib_entries: + rib_entries_bin += r.serialize() + + return struct.pack('!H', self.entry_count) + rib_entries_bin + + def serialize(self): + prefix_bin = self.prefix.serialize() + + rib_bin = self.serialize_rib_entries() # entry_count + rib_entries + + return struct.pack(self._HEADER_FMT, + self.seq_num) + prefix_bin + rib_bin + + +@TableDump2MrtMessage.register_type( + TableDump2MrtRecord.SUBTYPE_RIB_IPV4_UNICAST) +@TableDump2MrtMessage.register_type( + TableDump2MrtRecord.SUBTYPE_RIB_IPV4_MULTICAST) +class TableDump2RibIPv4UnicastMrtMessage(TableDump2AfiSafiSpecificRibMrtMessage): + """ + MRT Message for the TABLE_DUMP_V2 Type and the + RIB_IPV4_UNICAST/SUBTYPE_RIB_IPV4_MULTICAST subtype. + """ + _PREFIX_CLS = bgp.IPAddrPrefix + + +@TableDump2MrtMessage.register_type( + TableDump2MrtRecord.SUBTYPE_RIB_IPV6_UNICAST) +class TableDump2RibIPv6UnicastMrtMessage(TableDump2AfiSafiSpecificRibMrtMessage): + """ + MRT Message for the TABLE_DUMP_V2 Type and the + RIB_IPV6_UNICAST/SUBTYPE_RIB_IPV6_MULTICAST subtype. + """ + _PREFIX_CLS = bgp.IP6AddrPrefix + + +@TableDump2MrtMessage.register_type( + TableDump2MrtRecord.SUBTYPE_RIB_GENERIC) +class TableDump2RibGenericMrtMessage(TableDump2MrtMessage): + """ + MRT Message for the TABLE_DUMP_V2 Type and the RIB_GENERIC subtype. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Sequence Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Address Family Identifier |Subsequent AFI | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Network Layer Reachability Information (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Entry Count | RIB Entries (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IHB' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, seq_num, afi, safi, nlri, rib_entries, + entry_count=None): + self.seq_num = seq_num + self.afi = afi + self.safi = safi + assert isinstance(nlri, bgp._AddrPrefix) + self.nlri = nlri + self.entry_count = entry_count + assert isinstance(rib_entries, (list, tuple)) + for rib_entry in rib_entries: + assert isinstance(rib_entry, MrtRibEntry) + self.rib_entries = rib_entries + + @classmethod + def parse_rib_entries(cls, buf): + (entry_count,) = struct.unpack_from('!H', buf) + + rest = buf[2:] + rib_entries = [] + for i in range(entry_count): + r, rest = MrtRibEntry.parse(rest) + rib_entries.insert(i, r) + + return entry_count, rib_entries, rest + + @classmethod + def parse(cls, buf): + (seq_num, afi, safi) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + nlri, rest = bgp.BGPNLRI.parser(rest) + + entry_count, rib_entries, _ = cls.parse_rib_entries(rest) + + return cls(seq_num, afi, safi, nlri, rib_entries, entry_count) + + def serialize_rib_entries(self): + self.entry_count = len(self.rib_entries) # fixup + + rib_entries_bin = bytearray() + for r in self.rib_entries: + rib_entries_bin += r.serialize() + + return struct.pack('!H', self.entry_count) + rib_entries_bin + + def serialize(self): + nlri_bin = self.nlri.serialize() + + rib_bin = self.serialize_rib_entries() # entry_count + rib_entries + + return struct.pack(self._HEADER_FMT, + self.seq_num, + self.afi, self.safi) + nlri_bin + rib_bin + + +class MrtRibEntry(stringify.StringifyMixin): + """ + MRT RIB Entry. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer Index | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Originated Time | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Attribute Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | BGP Attributes... (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!HIH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, peer_index, originated_time, bgp_attributes, + attr_len=None): + self.peer_index = peer_index + self.originated_time = originated_time + assert isinstance(bgp_attributes, (list, tuple)) + for attr in bgp_attributes: + assert isinstance(attr, bgp._PathAttribute) + self.bgp_attributes = bgp_attributes + self.attr_len = attr_len + + @classmethod + def parse(cls, buf): + (peer_index, originated_time, attr_len) = struct.unpack_from( + cls._HEADER_FMT, buf) + + bgp_attr_bin = buf[cls.HEADER_SIZE:cls.HEADER_SIZE + attr_len] + bgp_attributes = [] + while bgp_attr_bin: + attr, bgp_attr_bin = bgp._PathAttribute.parser(bgp_attr_bin) + bgp_attributes.append(attr) + + return cls(peer_index, originated_time, bgp_attributes, + attr_len), buf[cls.HEADER_SIZE + attr_len:] + + def serialize(self): + bgp_attrs_bin = bytearray() + for attr in self.bgp_attributes: + bgp_attrs_bin += attr.serialize() + self.attr_len = len(bgp_attrs_bin) # fixup + + return struct.pack(self._HEADER_FMT, + self.peer_index, + self.originated_time, + self.attr_len) + bgp_attrs_bin + + +@six.add_metaclass(abc.ABCMeta) +class Bgp4MpMrtMessage(MrtMessage): + """ + MRT Message for the BGP4MP Type. + """ + + +@MrtRecord.register_type(MrtRecord.TYPE_BGP4MP) +class Bgp4MpMrtRecord(MrtCommonRecord): + MESSAGE_CLS = Bgp4MpMrtMessage + + # MRT Subtype + SUBTYPE_BGP4MP_STATE_CHANGE = 0 + SUBTYPE_BGP4MP_MESSAGE = 1 + SUBTYPE_BGP4MP_MESSAGE_AS4 = 4 + SUBTYPE_BGP4MP_STATE_CHANGE_AS4 = 5 + SUBTYPE_BGP4MP_MESSAGE_LOCAL = 6 + SUBTYPE_BGP4MP_MESSAGE_AS4_LOCAL = 7 + + +@MrtRecord.register_type(MrtRecord.TYPE_BGP4MP_ET) +class Bgp4MpEtMrtRecord(ExtendedTimestampMrtRecord): + MESSAGE_CLS = Bgp4MpMrtMessage + + # MRT Subtype + SUBTYPE_BGP4MP_STATE_CHANGE = 0 + SUBTYPE_BGP4MP_MESSAGE = 1 + SUBTYPE_BGP4MP_MESSAGE_AS4 = 4 + SUBTYPE_BGP4MP_STATE_CHANGE_AS4 = 5 + SUBTYPE_BGP4MP_MESSAGE_LOCAL = 6 + SUBTYPE_BGP4MP_MESSAGE_AS4_LOCAL = 7 + + +@Bgp4MpMrtMessage.register_type( + Bgp4MpMrtRecord.SUBTYPE_BGP4MP_STATE_CHANGE) +class Bgp4MpStateChangeMrtMessage(Bgp4MpMrtMessage): + """ + MRT Message for the BGP4MP Type and the BGP4MP_STATE_CHANGE subtype. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer AS Number | Local AS Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Index | Address Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Old State | New State | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!HHHH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _ADDRS_FMT = '!%ds%ds' + _STATES_FMT = '!HH' + STATES_SIZE = struct.calcsize(_STATES_FMT) + + # FSM states + STATE_IDLE = 1 + STATE_CONNECT = 2 + STATE_ACTIVE = 3 + STATE_OPEN_SENT = 4 + STATE_OPEN_CONFIRM = 5 + STATE_ESTABLISHED = 6 + + # Address Family types + AFI_IPv4 = 1 + AFI_IPv6 = 2 + + def __init__(self, peer_as, local_as, if_index, + peer_ip, local_ip, old_state, new_state, afi=None): + self.peer_as = peer_as + self.local_as = local_as + self.if_index = if_index + self.afi = afi + self.peer_ip = peer_ip + self.local_ip = local_ip + self.old_state = old_state + self.new_state = new_state + + @classmethod + def parse(cls, buf): + (peer_as, local_as, if_index, afi) = struct.unpack_from( + cls._HEADER_FMT, buf) + offset = cls.HEADER_SIZE + + if afi == cls.AFI_IPv4: + # IPv4 Address + addrs_fmt = cls._ADDRS_FMT % (4, 4) + elif afi == cls.AFI_IPv6: + # IPv6 Address + addrs_fmt = cls._ADDRS_FMT % (16, 16) + else: + raise struct.error('Unsupported address family: %d' % afi) + + (peer_ip, local_ip) = struct.unpack_from(addrs_fmt, buf, offset) + peer_ip = ip.bin_to_text(peer_ip) + local_ip = ip.bin_to_text(local_ip) + offset += struct.calcsize(addrs_fmt) + + (old_state, new_state) = struct.unpack_from( + cls._STATES_FMT, buf, offset) + + return cls(peer_as, local_as, if_index, + peer_ip, local_ip, old_state, new_state, afi) + + def serialize(self): + # fixup + if (netaddr.valid_ipv4(self.peer_ip) + and netaddr.valid_ipv4(self.local_ip)): + self.afi = self.AFI_IPv4 + elif (netaddr.valid_ipv6(self.peer_ip) + and netaddr.valid_ipv6(self.local_ip)): + self.afi = self.AFI_IPv6 + else: + raise ValueError( + 'peer_ip and local_ip must be the same address family: ' + 'peer_ip=%s, local_ip=%s' % (self.peer_ip, self.local_ip)) + + buf = struct.pack(self._HEADER_FMT, + self.peer_as, self.local_as, + self.if_index, self.afi) + + buf += ip.text_to_bin(self.peer_ip) + buf += ip.text_to_bin(self.local_ip) + + buf += struct.pack(self._STATES_FMT, + self.old_state, self.new_state) + + return buf + + +@Bgp4MpMrtMessage.register_type( + Bgp4MpMrtRecord.SUBTYPE_BGP4MP_MESSAGE) +class Bgp4MpMessageMrtMessage(Bgp4MpMrtMessage): + """ + MRT Message for the BGP4MP Type and the BGP4MP_MESSAGE subtype. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer AS Number | Local AS Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Index | Address Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | BGP Message... (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!HHHH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _ADDRS_FMT = '!%ds%ds' + + # Address Family types + AFI_IPv4 = 1 + AFI_IPv6 = 2 + + def __init__(self, peer_as, local_as, if_index, + peer_ip, local_ip, bgp_message, afi=None): + self.peer_as = peer_as + self.local_as = local_as + self.if_index = if_index + self.peer_ip = peer_ip + self.local_ip = local_ip + assert isinstance(bgp_message, bgp.BGPMessage) + self.bgp_message = bgp_message + self.afi = afi + + @classmethod + def parse(cls, buf): + (peer_as, local_as, if_index, afi) = struct.unpack_from( + cls._HEADER_FMT, buf) + offset = cls.HEADER_SIZE + + if afi == cls.AFI_IPv4: + # IPv4 Address + addrs_fmt = cls._ADDRS_FMT % (4, 4) + elif afi == cls.AFI_IPv6: + # IPv6 Address + addrs_fmt = cls._ADDRS_FMT % (16, 16) + else: + raise struct.error('Unsupported address family: %d' % afi) + + (peer_ip, local_ip) = struct.unpack_from(addrs_fmt, buf, offset) + peer_ip = ip.bin_to_text(peer_ip) + local_ip = ip.bin_to_text(local_ip) + offset += struct.calcsize(addrs_fmt) + + rest = buf[offset:] + bgp_message, _, _ = bgp.BGPMessage.parser(rest) + + return cls(peer_as, local_as, if_index, + peer_ip, local_ip, bgp_message, afi) + + def serialize(self): + # fixup + if (netaddr.valid_ipv4(self.peer_ip) + and netaddr.valid_ipv4(self.local_ip)): + self.afi = self.AFI_IPv4 + elif (netaddr.valid_ipv6(self.peer_ip) + and netaddr.valid_ipv6(self.local_ip)): + self.afi = self.AFI_IPv6 + else: + raise ValueError( + 'peer_ip and local_ip must be the same address family: ' + 'peer_ip=%s, local_ip=%s' % (self.peer_ip, self.local_ip)) + + buf = struct.pack(self._HEADER_FMT, + self.peer_as, self.local_as, + self.if_index, self.afi) + + buf += ip.text_to_bin(self.peer_ip) + buf += ip.text_to_bin(self.local_ip) + + buf += self.bgp_message.serialize() + + return buf + + +@Bgp4MpMrtMessage.register_type( + Bgp4MpMrtRecord.SUBTYPE_BGP4MP_MESSAGE_AS4) +class Bgp4MpMessageAs4MrtMessage(Bgp4MpMessageMrtMessage): + """ + MRT Message for the BGP4MP Type and the BGP4MP_MESSAGE_AS4 subtype. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer AS Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local AS Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Index | Address Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | BGP Message... (variable) + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IIHH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + +@Bgp4MpMrtMessage.register_type( + Bgp4MpMrtRecord.SUBTYPE_BGP4MP_STATE_CHANGE_AS4) +class Bgp4MpStateChangeAs4MrtMessage(Bgp4MpStateChangeMrtMessage): + """ + MRT Message for the BGP4MP Type and the BGP4MP_STATE_CHANGE_AS4 subtype. + """ + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer AS Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local AS Number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Index | Address Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Peer IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Local IP Address (variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Old State | New State | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IIHH' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + +@Bgp4MpMrtMessage.register_type( + Bgp4MpMrtRecord.SUBTYPE_BGP4MP_MESSAGE_LOCAL) +class Bgp4MpMessageLocalMrtMessage(Bgp4MpMessageMrtMessage): + """ + MRT Message for the BGP4MP Type and the BGP4MP_MESSAGE_LOCAL subtype. + """ + + +@Bgp4MpMrtMessage.register_type( + Bgp4MpMrtRecord.SUBTYPE_BGP4MP_MESSAGE_AS4_LOCAL) +class Bgp4MpMessageAs4LocalMrtMessage(Bgp4MpMessageAs4MrtMessage): + """ + MRT Message for the BGP4MP Type and the BGP4MP_MESSAGE_AS4_LOCAL subtype. + """ + + +# TODO: +# Currently, Ryu does not provide the packet library for ISIS protocol. +# Implement parser for ISIS MRT message. +# class IsisMrtRecord(MrtCommonRecord): +# class IsisMrtMessage(MrtMessage): + + +# TODO: +# Currently, Ryu does not provide the packet library for OSPFv3 protocol. +# Implement the parser for OSPFv3 MRT message. +# class Ospf3MrtRecord(MrtCommonRecord): +# class Ospf3MrtMessage(MrtMessage): + + +class Reader(object): + """ + MRT format file reader. + + ========= ================================================ + Argument Description + ========= ================================================ + f File object which reading MRT format file + in binary mode. + ========= ================================================ + + Example of Usage:: + + import bz2 + from ryu.lib import mrtlib + + count = 0 + for record in mrtlib.Reader( + bz2.BZ2File('rib.YYYYMMDD.hhmm.bz2', 'rb')): + print("%d, %s" % (count, record)) + count += 1 + """ + + def __init__(self, f): + self._f = f + + def __iter__(self): + return self + + def next(self): + header_buf = self._f.read(MrtRecord.HEADER_SIZE) + if len(header_buf) < MrtRecord.HEADER_SIZE: + raise StopIteration() + + # Hack to avoid eating memory up + self._f.seek(-MrtRecord.HEADER_SIZE, 1) + required_len = MrtRecord.parse_pre(header_buf) + buf = self._f.read(required_len) + record, _ = MrtRecord.parse(buf) + + return record + + # for Python 3 compatible + __next__ = next + + def close(self): + self._f.close() + + def __del__(self): + self.close() + + +class Writer(object): + """ + MRT format file writer. + + ========= ================================================ + Argument Description + ========= ================================================ + f File object which writing MRT format file + in binary mode. + ========= ================================================ + + Example of usage:: + + import bz2 + import time + from ryu.lib import mrtlib + from ryu.lib.packet import bgp + + mrt_writer = mrtlib.Writer( + bz2.BZ2File('rib.YYYYMMDD.hhmm.bz2', 'wb')) + + prefix = bgp.IPAddrPrefix(24, '10.0.0.0') + + rib_entry = mrtlib.MrtRibEntry( + peer_index=0, + originated_time=int(time.time()), + bgp_attributes=[bgp.BGPPathAttributeOrigin(0)]) + + message = mrtlib.TableDump2RibIPv4UnicastMrtMessage( + seq_num=0, + prefix=prefix, + rib_entries=[rib_entry]) + + record = mrtlib.TableDump2MrtRecord( + message=message) + + mrt_writer.write(record) + """ + + def __init__(self, f): + self._f = f + + def write(self, record): + if not isinstance(record, MrtRecord): + raise ValueError( + 'record should be an instance of MrtRecord subclass') + + self._f.write(record.serialize()) + + def close(self): + self._f.close() + + def __del__(self): + self.close() diff -Nru ryu-4.9/ryu/lib/netdevice.py ryu-4.15/ryu/lib/netdevice.py --- ryu-4.9/ryu/lib/netdevice.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/netdevice.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,70 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Constants defined in netdevice(7) + +# Interface flags +# from net/if.h +IFF_UP = 1 << 0 # Interface is running. +IFF_BROADCAST = 1 << 1 # Valid broadcast address set. +IFF_DEBUG = 1 << 2 # Internal debugging flag. +IFF_LOOPBACK = 1 << 3 # Interface is a loopback interface. +IFF_POINTOPOINT = 1 << 4 # Interface is a point-to-point link. +IFF_NOTRAILERS = 1 << 5 # Avoid use of trailers. +IFF_RUNNING = 1 << 6 # Resources allocated. +IFF_NOARP = 1 << 7 # No arp protocol, L2 destination address not set. +IFF_PROMISC = 1 << 8 # Interface is in promiscuous mode. +IFF_ALLMULTI = 1 << 9 # Receive all multicast packets. +IFF_MASTER = 1 << 10 # Master of a load balancing bundle. +IFF_SLAVE = 1 << 11 # Slave of a load balancing bundle. +IFF_MULTICAST = 1 << 12 # Supports multicast. +IFF_PORTSEL = 1 << 13 # Is able to select media type via ifmap. +IFF_AUTOMEDIA = 1 << 14 # Auto media selection active. +IFF_DYNAMIC = 1 << 15 # The addresses are lost when the interface goes down. +# from linux/if.h +IFF_LOWER_UP = 1 << 16 # Driver signals L1 up. (since Linux 2.6.17) +IFF_DORMANT = 1 << 17 # Driver signals dormant. (since Linux 2.6.17) +IFF_ECHO = 1 << 18 # Echo sent packets. (since Linux 2.6.25) + +# Private interface flags +# from linux/netdevice.h +IFF_802_1Q_VLAN = 1 << 0 # 802.1Q VLAN device. +IFF_EBRIDGE = 1 << 1 # Ethernet bridging device. +IFF_BONDING = 1 << 2 # bonding master or slave. +IFF_ISATAP = 1 << 3 # ISATAP interface (RFC4214). +IFF_WAN_HDLC = 1 << 4 # WAN HDLC device. +IFF_XMIT_DST_RELEASE = 1 << 5 # dev_hard_start_xmit() is allowed to release skb->dst. +IFF_DONT_BRIDGE = 1 << 6 # disallow bridging this ether dev. +IFF_DISABLE_NETPOLL = 1 << 7 # disable netpoll at run-time. +IFF_MACVLAN_PORT = 1 << 8 # device used as macvlan port. +IFF_BRIDGE_PORT = 1 << 9 # device used as bridge port. +IFF_OVS_DATAPATH = 1 << 10 # device used as Open vSwitch datapath port. +IFF_TX_SKB_SHARING = 1 << 11 # The interface supports sharing skbs on transmit. +IFF_UNICAST_FLT = 1 << 12 # Supports unicast filtering. +IFF_TEAM_PORT = 1 << 13 # device used as team port. +IFF_SUPP_NOFCS = 1 << 14 # device supports sending custom FCS. +IFF_LIVE_ADDR_CHANGE = 1 << 15 # device supports hardware address change when it's running. +IFF_MACVLAN = 1 << 16 # Macvlan device. +IFF_XMIT_DST_RELEASE_PERM = 1 << 17 # IFF_XMIT_DST_RELEASE not taking into account underlying stacked devices. +IFF_IPVLAN_MASTER = 1 << 18 # IPvlan master device. +IFF_IPVLAN_SLAVE = 1 << 19 # IPvlan slave device. +IFF_L3MDEV_MASTER = 1 << 20 # device is an L3 master device. +IFF_NO_QUEUE = 1 << 21 # device can run without qdisc attached. +IFF_OPENVSWITCH = 1 << 22 # device is a Open vSwitch master. +IFF_L3MDEV_SLAVE = 1 << 23 # device is enslaved to an L3 master device. +IFF_TEAM = 1 << 24 # device is a team device. +IFF_RXFH_CONFIGURED = 1 << 25 # device has had Rx Flow indirection table configured. +IFF_PHONY_HEADROOM = 1 << 26 # the headroom value is controlled by an external entity. (i.e. the master device for bridged veth) +IFF_MACSEC = 1 << 27 # device is a MACsec device. diff -Nru ryu-4.9/ryu/lib/ofctl_string.py ryu-4.15/ryu/lib/ofctl_string.py --- ryu-4.9/ryu/lib/ofctl_string.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_string.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,324 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re + +import ryu.exception +from ryu.lib.ofctl_utils import str_to_int +from ryu.ofproto import nicira_ext + + +def ofp_instruction_from_str(ofproto, action_str): + """ + Parse an ovs-ofctl style action string and return a list of + jsondict representations of OFPInstructionActions, which + can then be passed to ofproto_parser.ofp_instruction_from_jsondict. + + Please note that this is for making transition from ovs-ofctl + easier. Please consider using OFPAction constructors when writing + new codes. + + This function takes the following arguments. + + =========== ================================================= + Argument Description + =========== ================================================= + ofproto An ofproto module. + action_str An action string. + =========== ================================================= + """ + action_re = re.compile("([a-z_]+)(\([^)]*\)|[^a-z_,()][^,()]*)*") + result = [] + while len(action_str): + m = action_re.match(action_str) + if not m: + raise ryu.exception.OFPInvalidActionString(action_str=action_str) + action_name = m.group(1) + this_action = m.group(0) + paren_level = this_action.count('(') - this_action.count(')') + assert paren_level >= 0 + try: + # Parens can be nested. Look for as many ')'s as '('s. + if paren_level > 0: + this_action, rest = _tokenize_paren_block(action_str, m.end(0)) + else: + rest = action_str[m.end(0):] + if len(rest): + assert rest[0] == ',' + rest = rest[1:] + except Exception: + raise ryu.exception.OFPInvalidActionString(action_str=action_str) + if action_name == 'drop': + assert this_action == 'drop' + assert len(result) == 0 and rest == '' + return [] + converter = getattr(OfctlActionConverter, action_name, None) + if converter is None or not callable(converter): + raise ryu.exception.OFPInvalidActionString(action_str=action_name) + result.append(converter(ofproto, this_action)) + action_str = rest + + return result + + +def _tokenize_paren_block(string, pos): + paren_re = re.compile("[()]") + paren_level = string[:pos].count('(') - string[:pos].count(')') + while paren_level > 0: + m = paren_re.search(string[pos:]) + if m.group(0) == '(': + paren_level += 1 + else: + paren_level -= 1 + pos += m.end(0) + return string[:pos], string[pos:] + + +def tokenize_ofp_instruction_arg(arg): + """ + Tokenize an argument portion of ovs-ofctl style action string. + """ + arg_re = re.compile("[^,()]*") + try: + rest = arg + result = [] + while len(rest): + m = arg_re.match(rest) + if m.end(0) == len(rest): + result.append(rest) + return result + if rest[m.end(0)] == '(': + this_block, rest = _tokenize_paren_block( + rest, m.end(0) + 1) + result.append(this_block) + elif rest[m.end(0)] == ',': + result.append(m.group(0)) + rest = rest[m.end(0):] + else: # is ')' + raise Exception + if len(rest): + assert rest[0] == ',' + rest = rest[1:] + return result + except Exception: + raise ryu.exception.OFPInvalidActionString(action_str=arg) + + +_OXM_FIELD_OFCTL_ALIASES = { + 'tun_id': 'tunnel_id', + 'in_port': 'in_port_nxm', + 'in_port_oxm': 'in_port', + 'dl_src': 'eth_src', + 'dl_type': 'eth_type', + 'nw_src': 'ipv4_src', + 'ip_src': 'ipv4_src', + 'nw_proto': 'ip_proto', + 'nw_ecn': 'ip_ecn', + 'tp_src': 'tcp_src', + 'icmp_type': 'icmpv4_type', + 'icmp_code': 'icmpv4_code', + 'nd_target': 'ipv6_nd_target', + 'nd_sll': 'ipv6_nd_sll', + 'nd_tll': 'ipv6_nd_tll', + # Nicira extension + 'tun_src': 'tun_ipv4_src' +} + + +def ofp_ofctl_field_name_to_ryu(field): + """Convert an ovs-ofctl field name to ryu equivalent.""" + mapped = _OXM_FIELD_OFCTL_ALIASES.get(field) + if mapped: + return mapped + if field.endswith("_dst"): + mapped = _OXM_FIELD_OFCTL_ALIASES.get(field[:-3] + "src") + if mapped: + return mapped[:-3] + "dst" + return field + + +_NXM_FIELD_MAP = dict([(key, key + '_nxm') for key in [ + 'arp_sha', 'arp_tha', 'ipv6_src', 'ipv6_dst', + 'icmpv6_type', 'icmpv6_code', 'ip_ecn', 'tcp_flags']]) +_NXM_FIELD_MAP.update({ + 'tun_id': 'tunnel_id_nxm', 'ip_ttl': 'nw_ttl'}) + +_NXM_OF_FIELD_MAP = dict([(key, key + '_nxm') for key in [ + 'in_port', 'eth_dst', 'eth_src', 'eth_type', 'ip_proto', + 'tcp_src', 'tcp_dst', 'udp_src', 'udp_dst', + 'arp_op', 'arp_spa', 'arp_tpa']]) +_NXM_OF_FIELD_MAP.update({ + 'ip_src': 'ipv4_src_nxm', 'ip_dst': 'ipv4_dst_nxm', + 'icmp_type': 'icmpv4_type_nxm', 'icmp_code': 'icmpv4_code_nxm'}) + + +def nxm_field_name_to_ryu(field): + """ + Convert an ovs-ofctl style NXM_/OXM_ field name to + a ryu match field name. + """ + if field.endswith("_W"): + field = field[:-2] + prefix = field[:7] + field = field[7:].lower() + mapped_result = None + + if prefix == 'NXM_NX_': + mapped_result = _NXM_FIELD_MAP.get(field) + elif prefix == "NXM_OF_": + mapped_result = _NXM_OF_FIELD_MAP.get(field) + elif prefix == "OXM_OF_": + # no mapping needed + pass + else: + raise ValueError + if mapped_result is not None: + return mapped_result + return field + + +class OfctlActionConverter(object): + + @classmethod + def goto_table(cls, ofproto, action_str): + assert action_str.startswith('goto_table:') + table_id = str_to_int(action_str[len('goto_table:'):]) + return dict(OFPInstructionGotoTable={'table_id': table_id}) + + @classmethod + def normal(cls, ofproto, action_str): + return cls.output(ofproto, action_str) + + @classmethod + def output(cls, ofproto, action_str): + if action_str == 'normal': + port = ofproto.OFPP_NORMAL + else: + assert action_str.startswith('output:') + port = str_to_int(action_str[len('output:'):]) + return dict(OFPActionOutput={'port': port}) + + @classmethod + def pop_vlan(cls, ofproto, action_str): + return dict(OFPActionPopVlan={}) + + @classmethod + def set_field(cls, ofproto, action_str): + try: + assert action_str.startswith("set_field:") + value, key = action_str[len("set_field:"):].split("->", 1) + fieldarg = dict(field=ofp_ofctl_field_name_to_ryu(key)) + m = value.find('/') + if m >= 0: + fieldarg['value'] = str_to_int(value[:m]) + fieldarg['mask'] = str_to_int(value[m + 1:]) + else: + fieldarg['value'] = str_to_int(value) + except Exception: + raise ryu.exception.OFPInvalidActionString(action_str=action_str) + return dict(OFPActionSetField={ + 'field': {'OXMTlv': fieldarg}}) + + # NX actions + @classmethod + def resubmit(cls, ofproto, action_str): + arg = action_str[len("resubmit"):] + kwargs = {} + try: + if arg[0] == ':': + kwargs['in_port'] = str_to_int(arg[1:]) + elif arg[0] == '(' and arg[-1] == ')': + in_port, table_id = arg[1:-1].split(',') + if in_port: + kwargs['in_port'] = str_to_int(in_port) + if table_id: + kwargs['table_id'] = str_to_int(table_id) + else: + raise Exception + return dict(NXActionResubmitTable=kwargs) + except Exception: + raise ryu.exception.OFPInvalidActionString( + action_str=action_str) + + @classmethod + def conjunction(cls, ofproto, action_str): + try: + assert action_str.startswith('conjunction(') + assert action_str[-1] == ')' + args = action_str[len('conjunction('):-1].split(',') + assert len(args) == 2 + id_ = str_to_int(args[0]) + clauses = list(map(str_to_int, args[1].split('/'))) + assert len(clauses) == 2 + return dict(NXActionConjunction={ + 'clause': clauses[0] - 1, + 'n_clauses': clauses[1], + 'id': id_}) + except Exception: + raise ryu.exception.OFPInvalidActionString( + action_str=action_str) + + @classmethod + def ct(cls, ofproto, action_str): + str_to_port = {'ftp': 21, 'tftp': 69} + flags = 0 + zone_src = "" + zone_ofs_nbits = 0 + recirc_table = nicira_ext.NX_CT_RECIRC_NONE + alg = 0 + ct_actions = [] + + if len(action_str) > 2: + if (not action_str.startswith('ct(') or + action_str[-1] != ')'): + raise ryu.exception.OFPInvalidActionString( + action_str=action_str) + rest = tokenize_ofp_instruction_arg(action_str[len('ct('):-1]) + else: + rest = [] + for arg in rest: + if arg == 'commit': + flags |= nicira_ext.NX_CT_F_COMMIT + rest = rest[len('commit'):] + elif arg == 'force': + flags |= nicira_ext.NX_CT_F_FORCE + elif arg.startswith('exec('): + ct_actions = ofp_instruction_from_str( + ofproto, arg[len('exec('):-1]) + else: + try: + k, v = arg.split('=', 1) + if k == 'table': + recirc_table = str_to_int(v) + elif k == 'zone': + m = re.search('\[(\d*)\.\.(\d*)\]', v) + if m: + zone_ofs_nbits = nicira_ext.ofs_nbits( + int(m.group(1)), int(m.group(2))) + zone_src = nxm_field_name_to_ryu( + v[:m.start(0)]) + else: + zone_ofs_nbits = str_to_int(v) + elif k == 'alg': + alg = str_to_port[arg[len('alg='):]] + except Exception: + raise ryu.exception.OFPInvalidActionString( + action_str=action_str) + return dict(NXActionCT={'flags': flags, + 'zone_src': zone_src, + 'zone_ofs_nbits': zone_ofs_nbits, + 'recirc_table': recirc_table, + 'alg': alg, + 'actions': ct_actions}) diff -Nru ryu-4.9/ryu/lib/ofctl_utils.py ryu-4.15/ryu/lib/ofctl_utils.py --- ryu-4.9/ryu/lib/ofctl_utils.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_utils.py 2017-07-02 11:08:32.000000000 +0000 @@ -21,6 +21,7 @@ from ryu.lib import dpid from ryu.lib import hub +from ryu.ofproto import ofproto_v1_2 LOG = logging.getLogger(__name__) @@ -71,19 +72,21 @@ COPY_TTL_IN: parser.OFPActionCopyTtlIn, DEC_MPLS_TTL: parser.OFPActionDecMplsTtl, POP_VLAN: parser.OFPActionPopVlan, - DEC_NW_TTL: parser.OFPActionDecNwTtl, - POP_PBB: parser.OFPActionPopPbb} + DEC_NW_TTL: parser.OFPActionDecNwTtl} + if ofp.OFP_VERSION > ofproto_v1_2.OFP_VERSION: + actions[POP_PBB] = parser.OFPActionPopPbb need_ethertype = {PUSH_VLAN: parser.OFPActionPushVlan, PUSH_MPLS: parser.OFPActionPushMpls, - POP_MPLS: parser.OFPActionPopMpls, - PUSH_PBB: parser.OFPActionPushPbb} + POP_MPLS: parser.OFPActionPopMpls} + if ofp.OFP_VERSION > ofproto_v1_2.OFP_VERSION: + need_ethertype[PUSH_PBB] = parser.OFPActionPushPbb if action_type in actions: return actions[action_type]() elif action_type in need_ethertype: - ethertype = int(dic.get('ethertype')) + ethertype = str_to_int(dic.get('ethertype')) return need_ethertype[action_type](ethertype) elif action_type == OUTPUT: @@ -92,7 +95,7 @@ return parser.OFPActionOutput(out_port, max_len) elif action_type == SET_MPLS_TTL: - mpls_ttl = int(dic.get('mpls_ttl')) + mpls_ttl = str_to_int(dic.get('mpls_ttl')) return parser.OFPActionSetMplsTtl(mpls_ttl) elif action_type == SET_QUEUE: @@ -104,7 +107,7 @@ return parser.OFPActionGroup(group_id) elif action_type == SET_NW_TTL: - nw_ttl = int(dic.get('nw_ttl')) + nw_ttl = str_to_int(dic.get('nw_ttl')) return parser.OFPActionSetNwTtl(nw_ttl) elif action_type == SET_FIELD: @@ -113,9 +116,9 @@ return parser.OFPActionSetField(**{field: value}) elif action_type == 'COPY_FIELD': - n_bits = int(dic.get('n_bits')) - src_offset = int(dic.get('src_offset')) - dst_offset = int(dic.get('dst_offset')) + n_bits = str_to_int(dic.get('n_bits')) + src_offset = str_to_int(dic.get('src_offset')) + dst_offset = str_to_int(dic.get('dst_offset')) oxm_ids = [parser.OFPOxmId(str(dic.get('src_oxm_id'))), parser.OFPOxmId(str(dic.get('dst_oxm_id')))] return parser.OFPActionCopyField( @@ -124,14 +127,14 @@ elif action_type == 'METER': if hasattr(parser, 'OFPActionMeter'): # OpenFlow 1.5 or later - meter_id = int(dic.get('meter_id')) + meter_id = str_to_int(dic.get('meter_id')) return parser.OFPActionMeter(meter_id) else: # OpenFlow 1.4 or earlier return None elif action_type == EXPERIMENTER: - experimenter = int(dic.get('experimenter')) + experimenter = str_to_int(dic.get('experimenter')) data_type = dic.get('data_type', 'ascii') if data_type not in ('ascii', 'base64'): @@ -182,14 +185,14 @@ else: if '/' in value: val = value.split('/') - return int(val[0], 0), int(val[1], 0) + return str_to_int(val[0]), str_to_int(val[1]) else: if value.isdigit(): # described as decimal string value return int(value, 10) | ofpvid_present - return int(value, 0) + return str_to_int(value) def to_match_masked_int(value): @@ -263,6 +266,22 @@ return int(str(str_num), 0) +def get_role(dp, waiters, to_user): + stats = dp.ofproto_parser.OFPRoleRequest( + dp, dp.ofproto.OFPCR_ROLE_NOCHANGE, generation_id=0) + msgs = [] + send_stats_request(dp, stats, waiters, msgs, LOG) + descs = [] + + for msg in msgs: + d = msg.to_jsondict()[msg.__class__.__name__] + if to_user: + d['role'] = OFCtlUtil(dp.ofproto).ofp_role_to_user(d['role']) + descs.append(d) + + return {str(dp.id): descs} + + class OFCtlUtil(object): def __init__(self, ofproto): @@ -427,3 +446,6 @@ def ofp_role_from_user(self, role): return self._reserved_num_from_user(role, 'OFPCR_ROLE_') + + def ofp_role_to_user(self, role): + return self._reserved_num_to_user(role, 'OFPCR_ROLE_') diff -Nru ryu-4.9/ryu/lib/ofctl_v1_0.py ryu-4.15/ryu/lib/ofctl_v1_0.py --- ryu-4.9/ryu/lib/ofctl_v1_0.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_v1_0.py 2017-07-02 11:08:32.000000000 +0000 @@ -27,6 +27,7 @@ DEFAULT_TIMEOUT = 1.0 # TODO:XXX UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_0) +str_to_int = ofctl_utils.str_to_int def to_actions(dp, acts): @@ -39,13 +40,13 @@ # NOTE: The reason of this magic number (0xffe5) # is because there is no good constant in of1.0. # The same value as OFPCML_MAX of of1.2 and of1.3 is used. - max_len = int(a.get('max_len', 0xffe5)) + max_len = str_to_int(a.get('max_len', 0xffe5)) actions.append(dp.ofproto_parser.OFPActionOutput(port, max_len)) elif action_type == 'SET_VLAN_VID': - vlan_vid = int(a.get('vlan_vid', 0xffff)) + vlan_vid = str_to_int(a.get('vlan_vid', 0xffff)) actions.append(dp.ofproto_parser.OFPActionVlanVid(vlan_vid)) elif action_type == 'SET_VLAN_PCP': - vlan_pcp = int(a.get('vlan_pcp', 0)) + vlan_pcp = str_to_int(a.get('vlan_pcp', 0)) actions.append(dp.ofproto_parser.OFPActionVlanPcp(vlan_pcp)) elif action_type == 'STRIP_VLAN': actions.append(dp.ofproto_parser.OFPActionStripVlan()) @@ -62,13 +63,13 @@ nw_dst = ipv4_to_int(a.get('nw_dst')) actions.append(dp.ofproto_parser.OFPActionSetNwDst(nw_dst)) elif action_type == 'SET_NW_TOS': - nw_tos = int(a.get('nw_tos', 0)) + nw_tos = str_to_int(a.get('nw_tos', 0)) actions.append(dp.ofproto_parser.OFPActionSetNwTos(nw_tos)) elif action_type == 'SET_TP_SRC': - tp_src = int(a.get('tp_src', 0)) + tp_src = str_to_int(a.get('tp_src', 0)) actions.append(dp.ofproto_parser.OFPActionSetTpSrc(tp_src)) elif action_type == 'SET_TP_DST': - tp_dst = int(a.get('tp_dst', 0)) + tp_dst = str_to_int(a.get('tp_dst', 0)) actions.append(dp.ofproto_parser.OFPActionSetTpDst(tp_dst)) elif action_type == 'ENQUEUE': port = UTIL.ofp_port_from_user( @@ -162,19 +163,19 @@ dl_dst = haddr_to_bin(value) wildcards &= ~ofp.OFPFW_DL_DST elif key == 'dl_vlan': - dl_vlan = int(value) + dl_vlan = str_to_int(value) wildcards &= ~ofp.OFPFW_DL_VLAN elif key == 'dl_vlan_pcp': - dl_vlan_pcp = int(value) + dl_vlan_pcp = str_to_int(value) wildcards &= ~ofp.OFPFW_DL_VLAN_PCP elif key == 'dl_type': - dl_type = int(value) + dl_type = str_to_int(value) wildcards &= ~ofp.OFPFW_DL_TYPE elif key == 'nw_tos': - nw_tos = int(value) + nw_tos = str_to_int(value) wildcards &= ~ofp.OFPFW_NW_TOS elif key == 'nw_proto': - nw_proto = int(value) + nw_proto = str_to_int(value) wildcards &= ~ofp.OFPFW_NW_PROTO elif key == 'nw_src': ip = value.split('/') @@ -197,10 +198,10 @@ ~ofp.OFPFW_NW_DST_MASK wildcards &= v elif key == 'tp_src': - tp_src = int(value) + tp_src = str_to_int(value) wildcards &= ~ofp.OFPFW_TP_SRC elif key == 'tp_dst': - tp_dst = int(value) + tp_dst = str_to_int(value) wildcards &= ~ofp.OFPFW_TP_DST else: LOG.error("unknown match name %s, %s, %d", key, value, len(key)) @@ -285,11 +286,7 @@ for msg in msgs: stats = msg.body - s = {'mfr_desc': stats.mfr_desc, - 'hw_desc': stats.hw_desc, - 'sw_desc': stats.sw_desc, - 'serial_num': stats.serial_num, - 'dp_desc': stats.dp_desc} + s = stats.to_jsondict()[stats.__class__.__name__] return {str(dp.id): s} @@ -298,12 +295,12 @@ if port is None: port = dp.ofproto.OFPP_ALL else: - port = int(str(port), 0) + port = str_to_int(port) if queue_id is None: queue_id = dp.ofproto.OFPQ_ALL else: - queue_id = int(str(queue_id), 0) + queue_id = str_to_int(queue_id) stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port, queue_id) @@ -330,6 +327,9 @@ flow.get('table_id', 0xff)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_NONE)) + # Note: OpenFlow does not allow to filter flow entries by priority, + # but for efficiency, ofctl provides the way to do it. + priority = str_to_int(flow.get('priority', -1)) stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, 0, match, table_id, out_port) @@ -340,6 +340,9 @@ flows = [] for msg in msgs: for stats in msg.body: + if 0 <= priority != stats.priority: + continue + actions = actions_to_str(stats.actions) match = match_to_str(stats.match) @@ -439,7 +442,7 @@ if port is None: port = dp.ofproto.OFPP_NONE else: - port = int(str(port), 0) + port = str_to_int(port) stats = dp.ofproto_parser.OFPPortStatsRequest( dp, 0, port) @@ -492,16 +495,16 @@ def mod_flow_entry(dp, flow, cmd): - cookie = int(flow.get('cookie', 0)) - priority = int(flow.get('priority', - dp.ofproto.OFP_DEFAULT_PRIORITY)) + cookie = str_to_int(flow.get('cookie', 0)) + priority = str_to_int( + flow.get('priority', dp.ofproto.OFP_DEFAULT_PRIORITY)) buffer_id = UTIL.ofp_buffer_from_user( flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_NONE)) - flags = int(flow.get('flags', 0)) - idle_timeout = int(flow.get('idle_timeout', 0)) - hard_timeout = int(flow.get('hard_timeout', 0)) + flags = str_to_int(flow.get('flags', 0)) + idle_timeout = str_to_int(flow.get('idle_timeout', 0)) + hard_timeout = str_to_int(flow.get('hard_timeout', 0)) actions = to_actions(dp, flow.get('actions', [])) match = to_match(dp, flow.get('match', {})) @@ -530,9 +533,9 @@ def mod_port_behavior(dp, port_config): port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) hw_addr = str(port_config.get('hw_addr')) - config = int(port_config.get('config', 0)) - mask = int(port_config.get('mask', 0)) - advertise = int(port_config.get('advertise')) + config = str_to_int(port_config.get('config', 0)) + mask = str_to_int(port_config.get('mask', 0)) + advertise = str_to_int(port_config.get('advertise')) port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) diff -Nru ryu-4.9/ryu/lib/ofctl_v1_2.py ryu-4.15/ryu/lib/ofctl_v1_2.py --- ryu-4.9/ryu/lib/ofctl_v1_2.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_v1_2.py 2017-07-02 11:08:32.000000000 +0000 @@ -28,56 +28,14 @@ DEFAULT_TIMEOUT = 1.0 UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_2) +str_to_int = ofctl_utils.str_to_int def to_action(dp, dic): ofp = dp.ofproto parser = dp.ofproto_parser - action_type = dic.get('type') - if action_type == 'OUTPUT': - out_port = UTIL.ofp_port_from_user(dic.get('port', ofp.OFPP_ANY)) - max_len = UTIL.ofp_cml_from_user(dic.get('max_len', ofp.OFPCML_MAX)) - result = parser.OFPActionOutput(out_port, max_len) - elif action_type == 'COPY_TTL_OUT': - result = parser.OFPActionCopyTtlOut() - elif action_type == 'COPY_TTL_IN': - result = parser.OFPActionCopyTtlIn() - elif action_type == 'SET_MPLS_TTL': - mpls_ttl = int(dic.get('mpls_ttl')) - result = parser.OFPActionSetMplsTtl(mpls_ttl) - elif action_type == 'DEC_MPLS_TTL': - result = parser.OFPActionDecMplsTtl() - elif action_type == 'PUSH_VLAN': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPushVlan(ethertype) - elif action_type == 'POP_VLAN': - result = parser.OFPActionPopVlan() - elif action_type == 'PUSH_MPLS': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPushMpls(ethertype) - elif action_type == 'POP_MPLS': - ethertype = int(dic.get('ethertype')) - result = parser.OFPActionPopMpls(ethertype) - elif action_type == 'SET_QUEUE': - queue_id = UTIL.ofp_queue_from_user(dic.get('queue_id')) - result = parser.OFPActionSetQueue(queue_id) - elif action_type == 'GROUP': - group_id = UTIL.ofp_group_from_user(dic.get('group_id')) - result = parser.OFPActionGroup(group_id) - elif action_type == 'SET_NW_TTL': - nw_ttl = int(dic.get('nw_ttl')) - result = parser.OFPActionSetNwTtl(nw_ttl) - elif action_type == 'DEC_NW_TTL': - result = parser.OFPActionDecNwTtl() - elif action_type == 'SET_FIELD': - field = dic.get('field') - value = dic.get('value') - result = parser.OFPActionSetField(**{field: value}) - else: - result = None - - return result + return ofctl_utils.to_action(dic, ofp, parser, action_type, UTIL) def to_actions(dp, acts): @@ -112,8 +70,8 @@ table_id = UTIL.ofp_table_from_user(a.get('table_id')) inst.append(parser.OFPInstructionGotoTable(table_id)) elif action_type == 'WRITE_METADATA': - metadata = ofctl_utils.str_to_int(a.get('metadata')) - metadata_mask = (ofctl_utils.str_to_int(a['metadata_mask']) + metadata = str_to_int(a.get('metadata')) + metadata_mask = (str_to_int(a['metadata_mask']) if 'metadata_mask' in a else parser.UINT64_MAX) inst.append( @@ -208,50 +166,50 @@ def to_match(dp, attrs): convert = {'in_port': UTIL.ofp_port_from_user, - 'in_phy_port': int, - 'metadata': to_match_masked_int, - 'dl_dst': to_match_eth, - 'dl_src': to_match_eth, - 'eth_dst': to_match_eth, - 'eth_src': to_match_eth, - 'dl_type': int, - 'eth_type': int, + 'in_phy_port': str_to_int, + 'metadata': ofctl_utils.to_match_masked_int, + 'dl_dst': ofctl_utils.to_match_eth, + 'dl_src': ofctl_utils.to_match_eth, + 'eth_dst': ofctl_utils.to_match_eth, + 'eth_src': ofctl_utils.to_match_eth, + 'dl_type': str_to_int, + 'eth_type': str_to_int, 'dl_vlan': to_match_vid, 'vlan_vid': to_match_vid, - 'vlan_pcp': int, - 'ip_dscp': int, - 'ip_ecn': int, - 'nw_proto': int, - 'ip_proto': int, - 'nw_src': to_match_ip, - 'nw_dst': to_match_ip, - 'ipv4_src': to_match_ip, - 'ipv4_dst': to_match_ip, - 'tp_src': int, - 'tp_dst': int, - 'tcp_src': int, - 'tcp_dst': int, - 'udp_src': int, - 'udp_dst': int, - 'sctp_src': int, - 'sctp_dst': int, - 'icmpv4_type': int, - 'icmpv4_code': int, - 'arp_op': int, - 'arp_spa': to_match_ip, - 'arp_tpa': to_match_ip, - 'arp_sha': to_match_eth, - 'arp_tha': to_match_eth, - 'ipv6_src': to_match_ip, - 'ipv6_dst': to_match_ip, - 'ipv6_flabel': int, - 'icmpv6_type': int, - 'icmpv6_code': int, - 'ipv6_nd_target': to_match_ip, - 'ipv6_nd_sll': to_match_eth, - 'ipv6_nd_tll': to_match_eth, - 'mpls_label': int, - 'mpls_tc': int} + 'vlan_pcp': str_to_int, + 'ip_dscp': str_to_int, + 'ip_ecn': str_to_int, + 'nw_proto': str_to_int, + 'ip_proto': str_to_int, + 'nw_src': ofctl_utils.to_match_ip, + 'nw_dst': ofctl_utils.to_match_ip, + 'ipv4_src': ofctl_utils.to_match_ip, + 'ipv4_dst': ofctl_utils.to_match_ip, + 'tp_src': str_to_int, + 'tp_dst': str_to_int, + 'tcp_src': str_to_int, + 'tcp_dst': str_to_int, + 'udp_src': str_to_int, + 'udp_dst': str_to_int, + 'sctp_src': str_to_int, + 'sctp_dst': str_to_int, + 'icmpv4_type': str_to_int, + 'icmpv4_code': str_to_int, + 'arp_op': str_to_int, + 'arp_spa': ofctl_utils.to_match_ip, + 'arp_tpa': ofctl_utils.to_match_ip, + 'arp_sha': ofctl_utils.to_match_eth, + 'arp_tha': ofctl_utils.to_match_eth, + 'ipv6_src': ofctl_utils.to_match_ip, + 'ipv6_dst': ofctl_utils.to_match_ip, + 'ipv6_flabel': str_to_int, + 'icmpv6_type': str_to_int, + 'icmpv6_code': str_to_int, + 'ipv6_nd_target': ofctl_utils.to_match_ip, + 'ipv6_nd_sll': ofctl_utils.to_match_eth, + 'ipv6_nd_tll': ofctl_utils.to_match_eth, + 'mpls_label': str_to_int, + 'mpls_tc': str_to_int} keys = {'dl_dst': 'eth_dst', 'dl_src': 'eth_src', @@ -295,55 +253,8 @@ return dp.ofproto_parser.OFPMatch(**kwargs) -def to_match_eth(value): - if '/' in value: - value = value.split('/') - return value[0], value[1] - else: - return value - - -def to_match_ip(value): - if '/' in value: - (ip_addr, ip_mask) = value.split('/') - if ip_mask.isdigit(): - ip = netaddr.ip.IPNetwork(value) - ip_addr = str(ip.ip) - ip_mask = str(ip.netmask) - return ip_addr, ip_mask - else: - return value - - def to_match_vid(value): - # NOTE: If "vlan_id/dl_vlan" field is described as decimal int value - # (and decimal string value), it is treated as values of - # VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically - # applied. OTOH, If it is described as hexadecimal string, - # treated as values of oxm_value (including OFPVID_PRESENT - # bit), and OFPVID_PRESENT bit is NOT automatically applied. - if isinstance(value, int): - # described as decimal int value - return value | ofproto_v1_2.OFPVID_PRESENT - else: - if '/' in value: - val = value.split('/') - return int(val[0], 0), int(val[1], 0) - else: - if value.isdigit(): - # described as decimal string value - return int(value, 10) | ofproto_v1_2.OFPVID_PRESENT - else: - return int(value, 0) - - -def to_match_masked_int(value): - if isinstance(value, str) and '/' in value: - value = value.split('/') - return (ofctl_utils.str_to_int(value[0]), - ofctl_utils.str_to_int(value[1])) - else: - return ofctl_utils.str_to_int(value) + return ofctl_utils.to_match_vid(value, ofproto_v1_2.OFPVID_PRESENT) def match_to_str(ofmatch): @@ -383,14 +294,8 @@ def match_vid_to_str(value, mask): - if mask is not None: - value = '0x%04x/0x%04x' % (value, mask) - else: - if value & ofproto_v1_2.OFPVID_PRESENT: - value = str(value & ~ofproto_v1_2.OFPVID_PRESENT) - else: - value = '0x%04x' % value - return value + return ofctl_utils.match_vid_to_str( + value, mask, ofproto_v1_2.OFPVID_PRESENT) def get_desc_stats(dp, waiters): @@ -401,11 +306,7 @@ s = {} for msg in msgs: stats = msg.body - s = {'mfr_desc': stats.mfr_desc, - 'hw_desc': stats.hw_desc, - 'sw_desc': stats.sw_desc, - 'serial_num': stats.serial_num, - 'dp_desc': stats.dp_desc} + s = stats.to_jsondict()[stats.__class__.__name__] return {str(dp.id): s} @@ -416,12 +317,12 @@ if port is None: port = ofp.OFPP_ANY else: - port = int(str(port), 0) + port = str_to_int(port) if queue_id is None: queue_id = ofp.OFPQ_ALL else: - queue_id = int(str(queue_id), 0) + queue_id = str_to_int(queue_id) stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, port, queue_id, 0) @@ -446,7 +347,7 @@ if port is None: port = ofp.OFPP_ANY else: - port = UTIL.ofp_port_from_user(int(str(port), 0)) + port = UTIL.ofp_port_from_user(str_to_int(port)) stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) @@ -490,9 +391,12 @@ flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) + # Note: OpenFlow does not allow to filter flow entries by priority, + # but for efficiency, ofctl provides the way to do it. + priority = str_to_int(flow.get('priority', -1)) stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, table_id, out_port, out_group, cookie, cookie_mask, match) @@ -503,6 +407,9 @@ flows = [] for msg in msgs: for stats in msg.body: + if 0 <= priority != stats.priority: + continue + actions = actions_to_str(stats.instructions) match = match_to_str(stats.match) s = {'priority': stats.priority, @@ -530,8 +437,8 @@ flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) stats = dp.ofproto_parser.OFPAggregateStatsRequest( @@ -679,7 +586,7 @@ if port is None: port = dp.ofproto.OFPP_ANY else: - port = int(str(port), 0) + port = str_to_int(port) stats = dp.ofproto_parser.OFPPortStatsRequest( dp, port, 0) @@ -711,7 +618,7 @@ if group_id is None: group_id = dp.ofproto.OFPG_ALL else: - group_id = int(str(group_id), 0) + group_id = str_to_int(group_id) stats = dp.ofproto_parser.OFPGroupStatsRequest( dp, group_id, 0) @@ -856,20 +763,24 @@ return {str(dp.id): descs} +def get_role(dp, waiters, to_user=True): + return ofctl_utils.get_role(dp, waiters, to_user) + + def mod_flow_entry(dp, flow, cmd): - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0)) - idle_timeout = int(flow.get('idle_timeout', 0)) - hard_timeout = int(flow.get('hard_timeout', 0)) - priority = int(flow.get('priority', 0)) + idle_timeout = str_to_int(flow.get('idle_timeout', 0)) + hard_timeout = str_to_int(flow.get('hard_timeout', 0)) + priority = str_to_int(flow.get('priority', 0)) buffer_id = UTIL.ofp_buffer_from_user( flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) match = to_match(dp, flow.get('match', {})) inst = to_actions(dp, flow.get('actions', [])) @@ -896,9 +807,11 @@ buckets = [] for bucket in group.get('buckets', []): - weight = int(bucket.get('weight', 0)) - watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY)) - watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY)) + weight = str_to_int(bucket.get('weight', 0)) + watch_port = str_to_int( + bucket.get('watch_port', dp.ofproto.OFPP_ANY)) + watch_group = str_to_int( + bucket.get('watch_group', dp.ofproto.OFPG_ANY)) actions = [] for dic in bucket.get('actions', []): action = to_action(dp, dic) @@ -916,9 +829,9 @@ def mod_port_behavior(dp, port_config): port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) hw_addr = str(port_config.get('hw_addr')) - config = int(port_config.get('config', 0)) - mask = int(port_config.get('mask', 0)) - advertise = int(port_config.get('advertise')) + config = str_to_int(port_config.get('config', 0)) + mask = str_to_int(port_config.get('mask', 0)) + advertise = str_to_int(port_config.get('advertise')) port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) diff -Nru ryu-4.9/ryu/lib/ofctl_v1_3.py ryu-4.15/ryu/lib/ofctl_v1_3.py --- ryu-4.9/ryu/lib/ofctl_v1_3.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_v1_3.py 2017-07-02 11:08:32.000000000 +0000 @@ -30,6 +30,7 @@ DEFAULT_TIMEOUT = 1.0 UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_3) +str_to_int = ofctl_utils.str_to_int def to_action(dp, dic): @@ -72,8 +73,8 @@ table_id = UTIL.ofp_table_from_user(a.get('table_id')) inst.append(parser.OFPInstructionGotoTable(table_id)) elif action_type == 'WRITE_METADATA': - metadata = ofctl_utils.str_to_int(a.get('metadata')) - metadata_mask = (ofctl_utils.str_to_int(a['metadata_mask']) + metadata = str_to_int(a.get('metadata')) + metadata_mask = (str_to_int(a['metadata_mask']) if 'metadata_mask' in a else parser.UINT64_MAX) inst.append( @@ -192,51 +193,51 @@ def to_match(dp, attrs): convert = {'in_port': UTIL.ofp_port_from_user, - 'in_phy_port': int, + 'in_phy_port': str_to_int, 'metadata': ofctl_utils.to_match_masked_int, 'dl_dst': ofctl_utils.to_match_eth, 'dl_src': ofctl_utils.to_match_eth, 'eth_dst': ofctl_utils.to_match_eth, 'eth_src': ofctl_utils.to_match_eth, - 'dl_type': int, - 'eth_type': int, + 'dl_type': str_to_int, + 'eth_type': str_to_int, 'dl_vlan': to_match_vid, 'vlan_vid': to_match_vid, - 'vlan_pcp': int, - 'ip_dscp': int, - 'ip_ecn': int, - 'nw_proto': int, - 'ip_proto': int, + 'vlan_pcp': str_to_int, + 'ip_dscp': str_to_int, + 'ip_ecn': str_to_int, + 'nw_proto': str_to_int, + 'ip_proto': str_to_int, 'nw_src': ofctl_utils.to_match_ip, 'nw_dst': ofctl_utils.to_match_ip, 'ipv4_src': ofctl_utils.to_match_ip, 'ipv4_dst': ofctl_utils.to_match_ip, - 'tp_src': int, - 'tp_dst': int, - 'tcp_src': int, - 'tcp_dst': int, - 'udp_src': int, - 'udp_dst': int, - 'sctp_src': int, - 'sctp_dst': int, - 'icmpv4_type': int, - 'icmpv4_code': int, - 'arp_op': int, + 'tp_src': str_to_int, + 'tp_dst': str_to_int, + 'tcp_src': str_to_int, + 'tcp_dst': str_to_int, + 'udp_src': str_to_int, + 'udp_dst': str_to_int, + 'sctp_src': str_to_int, + 'sctp_dst': str_to_int, + 'icmpv4_type': str_to_int, + 'icmpv4_code': str_to_int, + 'arp_op': str_to_int, 'arp_spa': ofctl_utils.to_match_ip, 'arp_tpa': ofctl_utils.to_match_ip, 'arp_sha': ofctl_utils.to_match_eth, 'arp_tha': ofctl_utils.to_match_eth, 'ipv6_src': ofctl_utils.to_match_ip, 'ipv6_dst': ofctl_utils.to_match_ip, - 'ipv6_flabel': int, - 'icmpv6_type': int, - 'icmpv6_code': int, + 'ipv6_flabel': str_to_int, + 'icmpv6_type': str_to_int, + 'icmpv6_code': str_to_int, 'ipv6_nd_target': ofctl_utils.to_match_ip, 'ipv6_nd_sll': ofctl_utils.to_match_eth, 'ipv6_nd_tll': ofctl_utils.to_match_eth, - 'mpls_label': int, - 'mpls_tc': int, - 'mpls_bos': int, + 'mpls_label': str_to_int, + 'mpls_tc': str_to_int, + 'mpls_bos': str_to_int, 'pbb_isid': ofctl_utils.to_match_masked_int, 'tunnel_id': ofctl_utils.to_match_masked_int, 'ipv6_exthdr': ofctl_utils.to_match_masked_int} @@ -313,8 +314,7 @@ mask = match_field['OXMTlv']['mask'] value = match_field['OXMTlv']['value'] if key == 'dl_vlan': - value = ofctl_utils.match_vid_to_str(value, mask, - ofproto_v1_3.OFPVID_PRESENT) + value = match_vid_to_str(value, mask) elif key == 'in_port': value = UTIL.ofp_port_to_user(value) else: @@ -325,6 +325,11 @@ return match +def match_vid_to_str(value, mask): + return ofctl_utils.match_vid_to_str( + value, mask, ofproto_v1_3.OFPVID_PRESENT) + + def wrap_dpid_dict(dp, value, to_user=True): if to_user: return {str(dp.id): value} @@ -340,11 +345,7 @@ for msg in msgs: stats = msg.body - s = {'mfr_desc': stats.mfr_desc, - 'hw_desc': stats.hw_desc, - 'sw_desc': stats.sw_desc, - 'serial_num': stats.serial_num, - 'dp_desc': stats.dp_desc} + s = stats.to_jsondict()[stats.__class__.__name__] return wrap_dpid_dict(dp, s, to_user) @@ -355,12 +356,12 @@ if port is None: port = ofp.OFPP_ANY else: - port = int(str(port), 0) + port = str_to_int(port) if queue_id is None: queue_id = ofp.OFPQ_ALL else: - queue_id = int(str(queue_id), 0) + queue_id = str_to_int(queue_id) stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port, queue_id) @@ -387,7 +388,7 @@ if port is None: port = ofp.OFPP_ANY else: - port = UTIL.ofp_port_from_user(int(str(port), 0)) + port = UTIL.ofp_port_from_user(str_to_int(port)) stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port) msgs = [] ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG) @@ -440,14 +441,17 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) + # Note: OpenFlow does not allow to filter flow entries by priority, + # but for efficiency, ofctl provides the way to do it. + priority = str_to_int(flow.get('priority', -1)) stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, flags, table_id, out_port, out_group, cookie, cookie_mask, @@ -459,6 +463,9 @@ flows = [] for msg in msgs: for stats in msg.body: + if 0 <= priority != stats.priority: + continue + s = {'priority': stats.priority, 'cookie': stats.cookie, 'idle_timeout': stats.idle_timeout, @@ -490,13 +497,13 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) stats = dp.ofproto_parser.OFPAggregateStatsRequest( @@ -649,7 +656,7 @@ if port is None: port = dp.ofproto.OFPP_ANY else: - port = int(str(port), 0) + port = str_to_int(port) stats = dp.ofproto_parser.OFPPortStatsRequest( dp, 0, port) @@ -689,7 +696,7 @@ if meter_id is None: meter_id = dp.ofproto.OFPM_ALL else: - meter_id = int(str(meter_id), 0) + meter_id = str_to_int(meter_id) stats = dp.ofproto_parser.OFPMeterStatsRequest( dp, 0, meter_id) @@ -784,7 +791,7 @@ if meter_id is None: meter_id = dp.ofproto.OFPM_ALL else: - meter_id = int(str(meter_id), 0) + meter_id = str_to_int(meter_id) stats = dp.ofproto_parser.OFPMeterConfigStatsRequest( dp, 0, meter_id) @@ -837,7 +844,7 @@ if group_id is None: group_id = dp.ofproto.OFPG_ALL else: - group_id = int(str(group_id), 0) + group_id = str_to_int(group_id) stats = dp.ofproto_parser.OFPGroupStatsRequest( dp, 0, group_id) @@ -1035,20 +1042,24 @@ return wrap_dpid_dict(dp, descs, to_user) +def get_role(dp, waiters, to_user=True): + return ofctl_utils.get_role(dp, waiters, to_user) + + def mod_flow_entry(dp, flow, cmd): - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0)) - idle_timeout = int(flow.get('idle_timeout', 0)) - hard_timeout = int(flow.get('hard_timeout', 0)) - priority = int(flow.get('priority', 0)) + idle_timeout = str_to_int(flow.get('idle_timeout', 0)) + hard_timeout = str_to_int(flow.get('hard_timeout', 0)) + priority = str_to_int(flow.get('priority', 0)) buffer_id = UTIL.ofp_buffer_from_user( flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) match = to_match(dp, flow.get('match', {})) inst = to_actions(dp, flow.get('actions', [])) @@ -1083,18 +1094,18 @@ bands = [] for band in meter.get('bands', []): band_type = band.get('type') - rate = int(band.get('rate', 0)) - burst_size = int(band.get('burst_size', 0)) + rate = str_to_int(band.get('rate', 0)) + burst_size = str_to_int(band.get('burst_size', 0)) if band_type == 'DROP': bands.append( dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size)) elif band_type == 'DSCP_REMARK': - prec_level = int(band.get('prec_level', 0)) + prec_level = str_to_int(band.get('prec_level', 0)) bands.append( dp.ofproto_parser.OFPMeterBandDscpRemark( rate, burst_size, prec_level)) elif band_type == 'EXPERIMENTER': - experimenter = int(band.get('experimenter', 0)) + experimenter = str_to_int(band.get('experimenter', 0)) bands.append( dp.ofproto_parser.OFPMeterBandExperimenter( rate, burst_size, experimenter)) @@ -1122,9 +1133,11 @@ buckets = [] for bucket in group.get('buckets', []): - weight = int(bucket.get('weight', 0)) - watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY)) - watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY)) + weight = str_to_int(bucket.get('weight', 0)) + watch_port = str_to_int( + bucket.get('watch_port', dp.ofproto.OFPP_ANY)) + watch_group = str_to_int( + bucket.get('watch_group', dp.ofproto.OFPG_ANY)) actions = [] for dic in bucket.get('actions', []): action = to_action(dp, dic) @@ -1142,9 +1155,9 @@ def mod_port_behavior(dp, port_config): port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) hw_addr = str(port_config.get('hw_addr')) - config = int(port_config.get('config', 0)) - mask = int(port_config.get('mask', 0)) - advertise = int(port_config.get('advertise')) + config = str_to_int(port_config.get('config', 0)) + mask = str_to_int(port_config.get('mask', 0)) + advertise = str_to_int(port_config.get('advertise')) port_mod = dp.ofproto_parser.OFPPortMod( dp, port_no, hw_addr, config, mask, advertise) diff -Nru ryu-4.9/ryu/lib/ofctl_v1_4.py ryu-4.15/ryu/lib/ofctl_v1_4.py --- ryu-4.9/ryu/lib/ofctl_v1_4.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_v1_4.py 2017-07-02 11:08:32.000000000 +0000 @@ -25,6 +25,7 @@ DEFAULT_TIMEOUT = 1.0 UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_4) +str_to_int = ofctl_utils.str_to_int def to_action(dp, dic): @@ -68,18 +69,18 @@ instructions.append( parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, [])) elif inst_type == 'GOTO_TABLE': - table_id = int(i.get('table_id')) + table_id = str_to_int(i.get('table_id')) instructions.append(parser.OFPInstructionGotoTable(table_id)) elif inst_type == 'WRITE_METADATA': - metadata = ofctl_utils.str_to_int(i.get('metadata')) - metadata_mask = (ofctl_utils.str_to_int(i['metadata_mask']) + metadata = str_to_int(i.get('metadata')) + metadata_mask = (str_to_int(i['metadata_mask']) if 'metadata_mask' in i else parser.UINT64_MAX) instructions.append( parser.OFPInstructionWriteMetadata( metadata, metadata_mask)) elif inst_type == 'METER': - meter_id = int(i.get('meter_id')) + meter_id = str_to_int(i.get('meter_id')) instructions.append(parser.OFPInstructionMeter(meter_id)) else: LOG.error('Unknown instruction type: %s', inst_type) @@ -127,46 +128,46 @@ def to_match(dp, attrs): convert = {'in_port': UTIL.ofp_port_from_user, - 'in_phy_port': int, + 'in_phy_port': str_to_int, 'metadata': ofctl_utils.to_match_masked_int, 'eth_dst': ofctl_utils.to_match_eth, 'eth_src': ofctl_utils.to_match_eth, - 'eth_type': int, + 'eth_type': str_to_int, 'vlan_vid': to_match_vid, - 'vlan_pcp': int, - 'ip_dscp': int, - 'ip_ecn': int, - 'ip_proto': int, + 'vlan_pcp': str_to_int, + 'ip_dscp': str_to_int, + 'ip_ecn': str_to_int, + 'ip_proto': str_to_int, 'ipv4_src': ofctl_utils.to_match_ip, 'ipv4_dst': ofctl_utils.to_match_ip, - 'tcp_src': int, - 'tcp_dst': int, - 'udp_src': int, - 'udp_dst': int, - 'sctp_src': int, - 'sctp_dst': int, - 'icmpv4_type': int, - 'icmpv4_code': int, - 'arp_op': int, + 'tcp_src': str_to_int, + 'tcp_dst': str_to_int, + 'udp_src': str_to_int, + 'udp_dst': str_to_int, + 'sctp_src': str_to_int, + 'sctp_dst': str_to_int, + 'icmpv4_type': str_to_int, + 'icmpv4_code': str_to_int, + 'arp_op': str_to_int, 'arp_spa': ofctl_utils.to_match_ip, 'arp_tpa': ofctl_utils.to_match_ip, 'arp_sha': ofctl_utils.to_match_eth, 'arp_tha': ofctl_utils.to_match_eth, 'ipv6_src': ofctl_utils.to_match_ip, 'ipv6_dst': ofctl_utils.to_match_ip, - 'ipv6_flabel': int, - 'icmpv6_type': int, - 'icmpv6_code': int, + 'ipv6_flabel': str_to_int, + 'icmpv6_type': str_to_int, + 'icmpv6_code': str_to_int, 'ipv6_nd_target': ofctl_utils.to_match_ip, 'ipv6_nd_sll': ofctl_utils.to_match_eth, 'ipv6_nd_tll': ofctl_utils.to_match_eth, - 'mpls_label': int, - 'mpls_tc': int, - 'mpls_bos': int, + 'mpls_label': str_to_int, + 'mpls_tc': str_to_int, + 'mpls_bos': str_to_int, 'pbb_isid': ofctl_utils.to_match_masked_int, 'tunnel_id': ofctl_utils.to_match_masked_int, 'ipv6_exthdr': ofctl_utils.to_match_masked_int, - 'pbb_uca': int} + 'pbb_uca': str_to_int} keys = {'dl_dst': 'eth_dst', 'dl_src': 'eth_src', @@ -213,8 +214,7 @@ mask = match_field['OXMTlv']['mask'] value = match_field['OXMTlv']['value'] if key == 'vlan_vid': - value = ofctl_utils.match_vid_to_str(value, mask, - ofproto_v1_4.OFPVID_PRESENT) + value = match_vid_to_str(value, mask) elif key == 'in_port': value = UTIL.ofp_port_to_user(value) else: @@ -225,6 +225,11 @@ return match +def match_vid_to_str(value, mask): + return ofctl_utils.match_vid_to_str( + value, mask, ofproto_v1_4.OFPVID_PRESENT) + + def wrap_dpid_dict(dp, value, to_user=True): if to_user: return {str(dp.id): value} @@ -314,14 +319,17 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) + # Note: OpenFlow does not allow to filter flow entries by priority, + # but for efficiency, ofctl provides the way to do it. + priority = str_to_int(flow.get('priority', -1)) stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, flags, table_id, out_port, out_group, cookie, cookie_mask, @@ -333,6 +341,9 @@ flows = [] for msg in msgs: for stats in msg.body: + if 0 <= priority != stats.priority: + continue + s = stats.to_jsondict()[stats.__class__.__name__] s['instructions'] = instructions_to_str(stats.instructions) s['match'] = match_to_str(stats.match) @@ -345,13 +356,13 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) stats = dp.ofproto_parser.OFPAggregateStatsRequest( @@ -806,21 +817,25 @@ return wrap_dpid_dict(dp, descs, to_user) +def get_role(dp, waiters, to_user=True): + return ofctl_utils.get_role(dp, waiters, to_user) + + def mod_flow_entry(dp, flow, cmd): - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0)) - idle_timeout = int(flow.get('idle_timeout', 0)) - hard_timeout = int(flow.get('hard_timeout', 0)) - priority = int(flow.get('priority', 0)) + idle_timeout = str_to_int(flow.get('idle_timeout', 0)) + hard_timeout = str_to_int(flow.get('hard_timeout', 0)) + priority = str_to_int(flow.get('priority', 0)) buffer_id = UTIL.ofp_buffer_from_user( flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - importance = int(flow.get('importance', 0)) - flags = int(flow.get('flags', 0)) + importance = str_to_int(flow.get('importance', 0)) + flags = str_to_int(flow.get('flags', 0)) match = to_match(dp, flow.get('match', {})) inst = to_instructions(dp, flow.get('instructions', [])) @@ -851,18 +866,18 @@ bands = [] for band in meter.get('bands', []): band_type = band.get('type') - rate = int(band.get('rate', 0)) - burst_size = int(band.get('burst_size', 0)) + rate = str_to_int(band.get('rate', 0)) + burst_size = str_to_int(band.get('burst_size', 0)) if band_type == 'DROP': bands.append( dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size)) elif band_type == 'DSCP_REMARK': - prec_level = int(band.get('prec_level', 0)) + prec_level = str_to_int(band.get('prec_level', 0)) bands.append( dp.ofproto_parser.OFPMeterBandDscpRemark( rate, burst_size, prec_level)) elif band_type == 'EXPERIMENTER': - experimenter = int(band.get('experimenter', 0)) + experimenter = str_to_int(band.get('experimenter', 0)) bands.append( dp.ofproto_parser.OFPMeterBandExperimenter( rate, burst_size, experimenter)) @@ -886,9 +901,11 @@ buckets = [] for bucket in group.get('buckets', []): - weight = int(bucket.get('weight', 0)) - watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY)) - watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY)) + weight = str_to_int(bucket.get('weight', 0)) + watch_port = str_to_int( + bucket.get('watch_port', dp.ofproto.OFPP_ANY)) + watch_group = str_to_int( + bucket.get('watch_group', dp.ofproto.OFPG_ANY)) actions = [] for dic in bucket.get('actions', []): action = to_action(dp, dic) @@ -908,8 +925,8 @@ parser = dp.ofproto_parser port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) hw_addr = str(port_config.get('hw_addr')) - config = int(port_config.get('config', 0)) - mask = int(port_config.get('mask', 0)) + config = str_to_int(port_config.get('config', 0)) + mask = str_to_int(port_config.get('mask', 0)) properties = port_config.get('properties') prop = [] diff -Nru ryu-4.9/ryu/lib/ofctl_v1_5.py ryu-4.15/ryu/lib/ofctl_v1_5.py --- ryu-4.9/ryu/lib/ofctl_v1_5.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/ofctl_v1_5.py 2017-07-02 11:08:32.000000000 +0000 @@ -26,6 +26,7 @@ DEFAULT_TIMEOUT = 1.0 UTIL = ofctl_utils.OFCtlUtil(ofproto_v1_5) +str_to_int = ofctl_utils.str_to_int def to_action(dp, dic): @@ -69,11 +70,11 @@ instructions.append( parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, [])) elif inst_type == 'GOTO_TABLE': - table_id = int(i.get('table_id')) + table_id = str_to_int(i.get('table_id')) instructions.append(parser.OFPInstructionGotoTable(table_id)) elif inst_type == 'WRITE_METADATA': - metadata = ofctl_utils.str_to_int(i.get('metadata')) - metadata_mask = (ofctl_utils.str_to_int(i['metadata_mask']) + metadata = str_to_int(i.get('metadata')) + metadata_mask = (str_to_int(i['metadata_mask']) if 'metadata_mask' in i else parser.UINT64_MAX) instructions.append( @@ -129,48 +130,48 @@ def to_match(dp, attrs): convert = {'in_port': UTIL.ofp_port_from_user, - 'in_phy_port': int, + 'in_phy_port': str_to_int, 'metadata': ofctl_utils.to_match_masked_int, 'eth_dst': ofctl_utils.to_match_eth, 'eth_src': ofctl_utils.to_match_eth, - 'eth_type': int, + 'eth_type': str_to_int, 'vlan_vid': to_match_vid, - 'vlan_pcp': int, - 'ip_dscp': int, - 'ip_ecn': int, - 'ip_proto': int, + 'vlan_pcp': str_to_int, + 'ip_dscp': str_to_int, + 'ip_ecn': str_to_int, + 'ip_proto': str_to_int, 'ipv4_src': ofctl_utils.to_match_ip, 'ipv4_dst': ofctl_utils.to_match_ip, - 'tcp_src': int, - 'tcp_dst': int, - 'udp_src': int, - 'udp_dst': int, - 'sctp_src': int, - 'sctp_dst': int, - 'icmpv4_type': int, - 'icmpv4_code': int, - 'arp_op': int, + 'tcp_src': str_to_int, + 'tcp_dst': str_to_int, + 'udp_src': str_to_int, + 'udp_dst': str_to_int, + 'sctp_src': str_to_int, + 'sctp_dst': str_to_int, + 'icmpv4_type': str_to_int, + 'icmpv4_code': str_to_int, + 'arp_op': str_to_int, 'arp_spa': ofctl_utils.to_match_ip, 'arp_tpa': ofctl_utils.to_match_ip, 'arp_sha': ofctl_utils.to_match_eth, 'arp_tha': ofctl_utils.to_match_eth, 'ipv6_src': ofctl_utils.to_match_ip, 'ipv6_dst': ofctl_utils.to_match_ip, - 'ipv6_flabel': int, - 'icmpv6_type': int, - 'icmpv6_code': int, + 'ipv6_flabel': str_to_int, + 'icmpv6_type': str_to_int, + 'icmpv6_code': str_to_int, 'ipv6_nd_target': ofctl_utils.to_match_ip, 'ipv6_nd_sll': ofctl_utils.to_match_eth, 'ipv6_nd_tll': ofctl_utils.to_match_eth, - 'mpls_label': int, - 'mpls_tc': int, - 'mpls_bos': int, + 'mpls_label': str_to_int, + 'mpls_tc': str_to_int, + 'mpls_bos': str_to_int, 'pbb_isid': ofctl_utils.to_match_masked_int, 'tunnel_id': ofctl_utils.to_match_masked_int, 'ipv6_exthdr': ofctl_utils.to_match_masked_int, - 'pbb_uca': int, - 'tcp_flags': int, - 'actset_output': int, + 'pbb_uca': str_to_int, + 'tcp_flags': str_to_int, + 'actset_output': str_to_int, 'packet_type': ofctl_utils.to_match_packet_type} keys = {'dl_dst': 'eth_dst', @@ -218,8 +219,7 @@ mask = match_field['OXMTlv']['mask'] value = match_field['OXMTlv']['value'] if key == 'vlan_vid': - value = ofctl_utils.match_vid_to_str(value, mask, - ofproto_v1_5.OFPVID_PRESENT) + value = match_vid_to_str(value, mask) elif key == 'in_port': value = UTIL.ofp_port_to_user(value) elif key == 'packet_type': @@ -232,6 +232,11 @@ return match +def match_vid_to_str(value, mask): + return ofctl_utils.match_vid_to_str( + value, mask, ofproto_v1_5.OFPVID_PRESENT) + + def wrap_dpid_dict(dp, value, to_user=True): if to_user: return {str(dp.id): value} @@ -346,14 +351,17 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) + # Note: OpenFlow does not allow to filter flow entries by priority, + # but for efficiency, ofctl provides the way to do it. + priority = str_to_int(flow.get('priority', -1)) stats = dp.ofproto_parser.OFPFlowDescStatsRequest( dp, flags, table_id, out_port, out_group, cookie, cookie_mask, @@ -365,6 +373,9 @@ flows = [] for msg in msgs: for stats in msg.body: + if 0 <= priority != stats.priority: + continue + s = stats.to_jsondict()[stats.__class__.__name__] s['instructions'] = instructions_to_str(stats.instructions) s['stats'] = stats_to_str(stats.stats) @@ -378,14 +389,17 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) + # Note: OpenFlow does not allow to filter flow entries by priority, + # but for efficiency, ofctl provides the way to do it. + priority = str_to_int(flow.get('priority', -1)) stats = dp.ofproto_parser.OFPFlowStatsRequest( dp, flags, table_id, out_port, out_group, cookie, cookie_mask, @@ -397,6 +411,9 @@ flows = [] for msg in msgs: for stats in msg.body: + if 0 <= priority != stats.priority: + continue + s = stats.to_jsondict()[stats.__class__.__name__] s['stats'] = stats_to_str(stats.stats) s['match'] = match_to_str(stats.match) @@ -409,13 +426,13 @@ flow = flow if flow else {} table_id = UTIL.ofp_table_from_user( flow.get('table_id', dp.ofproto.OFPTT_ALL)) - flags = int(flow.get('flags', 0)) + flags = str_to_int(flow.get('flags', 0)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) match = to_match(dp, flow.get('match', {})) stats = dp.ofproto_parser.OFPAggregateStatsRequest( @@ -897,21 +914,25 @@ return wrap_dpid_dict(dp, descs, to_user) +def get_role(dp, waiters, to_user=True): + return ofctl_utils.get_role(dp, waiters, to_user) + + def mod_flow_entry(dp, flow, cmd): - cookie = int(flow.get('cookie', 0)) - cookie_mask = int(flow.get('cookie_mask', 0)) + cookie = str_to_int(flow.get('cookie', 0)) + cookie_mask = str_to_int(flow.get('cookie_mask', 0)) table_id = UTIL.ofp_table_from_user(flow.get('table_id', 0)) - idle_timeout = int(flow.get('idle_timeout', 0)) - hard_timeout = int(flow.get('hard_timeout', 0)) - priority = int(flow.get('priority', 0)) + idle_timeout = str_to_int(flow.get('idle_timeout', 0)) + hard_timeout = str_to_int(flow.get('hard_timeout', 0)) + priority = str_to_int(flow.get('priority', 0)) buffer_id = UTIL.ofp_buffer_from_user( flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER)) out_port = UTIL.ofp_port_from_user( flow.get('out_port', dp.ofproto.OFPP_ANY)) out_group = UTIL.ofp_group_from_user( flow.get('out_group', dp.ofproto.OFPG_ANY)) - importance = int(flow.get('importance', 0)) - flags = int(flow.get('flags', 0)) + importance = str_to_int(flow.get('importance', 0)) + flags = str_to_int(flow.get('flags', 0)) match = to_match(dp, flow.get('match', {})) inst = to_instructions(dp, flow.get('instructions', [])) @@ -942,18 +963,18 @@ bands = [] for band in meter.get('bands', []): band_type = band.get('type') - rate = int(band.get('rate', 0)) - burst_size = int(band.get('burst_size', 0)) + rate = str_to_int(band.get('rate', 0)) + burst_size = str_to_int(band.get('burst_size', 0)) if band_type == 'DROP': bands.append( dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size)) elif band_type == 'DSCP_REMARK': - prec_level = int(band.get('prec_level', 0)) + prec_level = str_to_int(band.get('prec_level', 0)) bands.append( dp.ofproto_parser.OFPMeterBandDscpRemark( rate, burst_size, prec_level)) elif band_type == 'EXPERIMENTER': - experimenter = int(band.get('experimenter', 0)) + experimenter = str_to_int(band.get('experimenter', 0)) bands.append( dp.ofproto_parser.OFPMeterBandExperimenter( rate, burst_size, experimenter)) @@ -977,7 +998,7 @@ LOG.error('Unknown group type: %s', group.get('type')) group_id = UTIL.ofp_group_from_user(group.get('group_id', 0)) - command_bucket_id = int(group.get('command_bucket_id', 0)) + command_bucket_id = str_to_int(group.get('command_bucket_id', 0)) # Note: # The list of group property types that are currently defined @@ -988,7 +1009,7 @@ for bucket in group.get('buckets', []): # get bucket_id in buckets - bucket_id = int(bucket.get('bucket_id', 0)) + bucket_id = str_to_int(bucket.get('bucket_id', 0)) # get actions in buckets bucket_actions = [] @@ -1005,17 +1026,17 @@ group_bp_type = t if t != group_bp_type else ofp.OFPGBPT_WEIGHT if group_bp_type == ofp.OFPGBPT_WEIGHT: - weight = int(p.get('weight', 0)) + weight = str_to_int(p.get('weight', 0)) bucket_properties.append( parser.OFPGroupBucketPropWeight( type_=group_bp_type, weight=weight)) elif group_bp_type == ofp.OFPGBPT_WATCH_PORT: - watch_port = int(p.get('watch', dp.ofproto.OFPP_ANY)) + watch_port = str_to_int(p.get('watch', dp.ofproto.OFPP_ANY)) bucket_properties.append( parser.OFPGroupBucketPropWatch( type_=group_bp_type, watch=watch_port)) elif group_bp_type == ofp.OFPGBPT_WATCH_GROUP: - watch_group = int(p.get('watch', dp.ofproto.OFPG_ANY)) + watch_group = str_to_int(p.get('watch', dp.ofproto.OFPG_ANY)) bucket_properties.append( parser.OFPGroupBucketPropWatch( type_=group_bp_type, watch=watch_group)) @@ -1053,8 +1074,8 @@ parser = dp.ofproto_parser port_no = UTIL.ofp_port_from_user(port_config.get('port_no', 0)) hw_addr = str(port_config.get('hw_addr')) - config = int(port_config.get('config', 0)) - mask = int(port_config.get('mask', 0)) + config = str_to_int(port_config.get('config', 0)) + mask = str_to_int(port_config.get('mask', 0)) properties = port_config.get('properties') prop = [] @@ -1086,8 +1107,9 @@ def set_role(dp, role): r = UTIL.ofp_role_from_user(role.get('role', dp.ofproto.OFPCR_ROLE_EQUAL)) - role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, 0) + role_request = dp.ofproto_parser.OFPRoleRequest(dp, r, None, 0) ofctl_utils.send_msg(dp, role_request, LOG) + # NOTE(jkoelker) Alias common funcitons send_experimenter = ofctl_utils.send_experimenter diff -Nru ryu-4.9/ryu/lib/packet/arp.py ryu-4.15/ryu/lib/packet/arp.py --- ryu-4.9/ryu/lib/packet/arp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/arp.py 2017-07-02 11:08:32.000000000 +0000 @@ -37,19 +37,19 @@ MAC addresses are represented as a string like '08:60:6e:7f:74:e7'. __init__ takes the corresponding args in this order. - ============== ==================== ===================== - Attribute Description Example - ============== ==================== ===================== - hwtype ar$hrd - proto ar$pro - hlen ar$hln - plen ar$pln - opcode ar$op - src_mac ar$sha '08:60:6e:7f:74:e7' - src_ip ar$spa '192.0.2.1' - dst_mac ar$tha '00:00:00:00:00:00' - dst_ip ar$tpa '192.0.2.2' - ============== ==================== ===================== + ============== ===================================== ===================== + Attribute Description Example + ============== ===================================== ===================== + hwtype Hardware address. + proto Protocol address. + hlen byte length of each hardware address. + plen byte length of each protocol address. + opcode operation codes. + src_mac Hardware address of sender. '08:60:6e:7f:74:e7' + src_ip Protocol address of sender. '192.0.2.1' + dst_mac Hardware address of target. '00:00:00:00:00:00' + dst_ip Protocol address of target. '192.0.2.2' + ============== ===================================== ===================== """ _PACK_STR = '!HHBBH6s4s6s4s' diff -Nru ryu-4.9/ryu/lib/packet/bfd.py ryu-4.15/ryu/lib/packet/bfd.py --- ryu-4.9/ryu/lib/packet/bfd.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/bfd.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,8 +16,7 @@ """ BFD Control packet parser/serializer -RFC 5880 -BFD Control packet format +[RFC 5880] BFD Control packet format:: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 @@ -35,47 +34,46 @@ | Required Min Echo RX Interval | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - An optional Authentication Section MAY be present in the following - format of types: +An optional Authentication Section MAY be present in the following +format of types: - 1. Format of Simple Password Authentication Section - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Auth Type | Auth Len | Auth Key ID | Password... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | ... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - 2. Format of Keyed MD5 and Meticulous Keyed MD5 Authentication Section - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Auth Type | Auth Len | Auth Key ID | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Sequence Number | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Auth Key/Digest... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | ... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - - 3. Format of Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Auth Type | Auth Len | Auth Key ID | Reserved | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Sequence Number | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | Auth Key/Hash... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | ... | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +1. Format of Simple Password Authentication Section:: + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Auth Len | Auth Key ID | Password... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | ... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +2. Format of Keyed MD5 and Meticulous Keyed MD5 Authentication Section:: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Auth Len | Auth Key ID | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Sequence Number | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Key/Digest... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | ... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +3. Format of Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section:: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Auth Len | Auth Key ID | Reserved | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Sequence Number | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Key/Hash... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | ... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ """ import binascii import hashlib diff -Nru ryu-4.9/ryu/lib/packet/bgp.py ryu-4.15/ryu/lib/packet/bgp.py --- ryu-4.9/ryu/lib/packet/bgp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/bgp.py 2017-07-02 11:08:32.000000000 +0000 @@ -23,11 +23,16 @@ # - RFC 4364 BGP/MPLS IP Virtual Private Networks (VPNs) import abc +import base64 +import collections import copy import functools +import io +import itertools +import math +import re import socket import struct -import base64 import netaddr import six @@ -41,6 +46,7 @@ from ryu.lib.packet import mpls from ryu.lib import addrconv from ryu.lib import type_desc +from ryu.lib.type_desc import TypeDisp from ryu.lib import ip from ryu.lib.pack_utils import msg_pack_into from ryu.utils import binary_str @@ -191,43 +197,6 @@ return struct.pack(self._VALUE_PACK_STR, *args) -class _TypeDisp(object): - _TYPES = {} - _REV_TYPES = None - _UNKNOWN_TYPE = None - - @classmethod - def register_unknown_type(cls): - def _register_type(subcls): - cls._UNKNOWN_TYPE = subcls - return subcls - return _register_type - - @classmethod - def register_type(cls, type_): - cls._TYPES = cls._TYPES.copy() - - def _register_type(subcls): - cls._TYPES[type_] = subcls - cls._REV_TYPES = None - return subcls - return _register_type - - @classmethod - def _lookup_type(cls, type_): - try: - return cls._TYPES[type_] - except KeyError: - return cls._UNKNOWN_TYPE - - @classmethod - def _rev_lookup_type(cls, targ_cls): - if cls._REV_TYPES is None: - rev = dict((v, k) for k, v in cls._TYPES.items()) - cls._REV_TYPES = rev - return cls._REV_TYPES[targ_cls] - - class BgpExc(Exception): """Base bgp exception.""" @@ -604,6 +573,12 @@ RF_IPv4_MPLS = RouteFamily(addr_family.IP, subaddr_family.MPLS_LABEL) RF_IPv6_MPLS = RouteFamily(addr_family.IP6, subaddr_family.MPLS_LABEL) RF_L2_EVPN = RouteFamily(addr_family.L2VPN, subaddr_family.EVPN) +RF_IPv4_FLOWSPEC = RouteFamily(addr_family.IP, subaddr_family.IP_FLOWSPEC) +RF_IPv6_FLOWSPEC = RouteFamily(addr_family.IP6, subaddr_family.IP_FLOWSPEC) +RF_VPNv4_FLOWSPEC = RouteFamily(addr_family.IP, subaddr_family.VPN_FLOWSPEC) +RF_VPNv6_FLOWSPEC = RouteFamily(addr_family.IP6, subaddr_family.VPN_FLOWSPEC) +RF_L2VPN_FLOWSPEC = RouteFamily( + addr_family.L2VPN, subaddr_family.VPN_FLOWSPEC) RF_RTC_UC = RouteFamily(addr_family.IP, subaddr_family.ROUTE_TARGET_CONSTRAINTS) @@ -615,6 +590,11 @@ (addr_family.IP, subaddr_family.MPLS_LABEL): RF_IPv4_MPLS, (addr_family.IP6, subaddr_family.MPLS_LABEL): RF_IPv6_MPLS, (addr_family.L2VPN, subaddr_family.EVPN): RF_L2_EVPN, + (addr_family.IP, subaddr_family.IP_FLOWSPEC): RF_IPv4_FLOWSPEC, + (addr_family.IP6, subaddr_family.IP_FLOWSPEC): RF_IPv6_FLOWSPEC, + (addr_family.IP, subaddr_family.VPN_FLOWSPEC): RF_VPNv4_FLOWSPEC, + (addr_family.IP6, subaddr_family.VPN_FLOWSPEC): RF_VPNv6_FLOWSPEC, + (addr_family.L2VPN, subaddr_family.VPN_FLOWSPEC): RF_L2VPN_FLOWSPEC, (addr_family.IP, subaddr_family.ROUTE_TARGET_CONSTRAINTS): RF_RTC_UC } @@ -628,7 +608,7 @@ return binary + b'\0' * (len_ - len(binary)) -class _RouteDistinguisher(StringifyMixin, _TypeDisp, _Value): +class _RouteDistinguisher(StringifyMixin, TypeDisp, _Value): _PACK_STR = '!H' TWO_OCTET_AS = 0 IPV4_ADDRESS = 1 @@ -1038,9 +1018,23 @@ return "%s:%s" % (self.route_dist, self.prefix) -class EvpnEsi(StringifyMixin, _TypeDisp, _Value): +class EvpnEsi(StringifyMixin, TypeDisp, _Value): """ Ethernet Segment Identifier + + The supported ESI Types: + + - ``EvpnEsi.ARBITRARY`` indicates EvpnArbitraryEsi. + + - ``EvpnEsi.LACP`` indicates EvpnLACPEsi. + + - ``EvpnEsi.L2_BRIDGE`` indicates EvpnL2BridgeEsi. + + - ``EvpnEsi.MAC_BASED`` indicates EvpnMacBasedEsi. + + - ``EvpnEsi.ROUTER_ID`` indicates EvpnRouterIDEsi. + + - ``EvpnEsi.AS_BASED`` indicates EvpnASBasedEsi. """ _PACK_STR = "!B" # ESI Type _ESI_LEN = 10 @@ -1285,7 +1279,7 @@ self.local_disc = local_disc -class EvpnNLRI(StringifyMixin, _TypeDisp): +class EvpnNLRI(StringifyMixin, TypeDisp): """ BGP Network Layer Reachability Information (NLRI) for EVPN """ @@ -1310,6 +1304,9 @@ ROUTE_TYPE_NAME = None # must be defined in subclass + # Reserved value for Ethernet Tag ID. + MAX_ET = 0xFFFFFFFF + # Dictionary of ROUTE_TYPE_NAME to subclass. # e.g.) # _NAMES = {'eth_ad': EvpnEthernetAutoDiscoveryNLRI, ...} @@ -1621,8 +1618,8 @@ ] } - def __init__(self, route_dist, esi, ethernet_tag_id, mac_addr, ip_addr, - mpls_labels=None, vni=None, labels=None, + def __init__(self, route_dist, ethernet_tag_id, mac_addr, ip_addr, + esi=None, mpls_labels=None, vni=None, labels=None, mac_addr_len=None, ip_addr_len=None, type_=None, length=None): super(EvpnMacIPAdvertisementNLRI, self).__init__(type_, length) @@ -1893,7 +1890,8 @@ _LABEL_LEN = 3 def __init__(self, route_dist, ethernet_tag_id, ip_prefix, - esi=0, gw_ip_addr=None, mpls_label=None, vni=None, label=None, + esi=None, gw_ip_addr=None, + mpls_label=None, vni=None, label=None, type_=None, length=None): super(EvpnIpPrefixNLRI, self).__init__(type_, length) self.route_dist = route_dist @@ -1996,240 +1994,1519 @@ return [self.mpls_label] -@functools.total_ordering -class RouteTargetMembershipNLRI(StringifyMixin): - """Route Target Membership NLRI. - - Route Target membership NLRI is advertised in BGP UPDATE messages using - the MP_REACH_NLRI and MP_UNREACH_NLRI attributes. +class _FlowSpecNLRIBase(StringifyMixin, TypeDisp): + """ + Base class for Flow Specification NLRI """ - ROUTE_FAMILY = RF_RTC_UC - DEFAULT_AS = '0:0' - DEFAULT_RT = '0:0' - - def __init__(self, origin_as, route_target): - # If given is not default_as and default_rt - if not (origin_as is self.DEFAULT_AS and - route_target is self.DEFAULT_RT): - # We validate them - if (not self._is_valid_asn(origin_as) or - not self._is_valid_ext_comm_attr(route_target)): - raise ValueError('Invalid params.') - self.origin_as = origin_as - self.route_target = route_target - - def _is_valid_asn(self, asn): - """Returns True if the given AS number is Two or Four Octet.""" - if isinstance(asn, six.integer_types) and 0 <= asn <= 0xffffffff: - return True - else: - return False + # flow-spec NLRI: + # +-----------------------------------+ + # | length (0xnn or 0xfn nn) | + # +-----------------------------------+ + # | NLRI value (variable) | + # +-----------------------------------+ + ROUTE_FAMILY = None + _LENGTH_SHORT_FMT = '!B' + LENGTH_SHORT_SIZE = struct.calcsize(_LENGTH_SHORT_FMT) + _LENGTH_LONG_FMT = '!H' + LENGTH_LONG_SIZE = struct.calcsize(_LENGTH_LONG_FMT) + _LENGTH_THRESHOLD = 0xf000 + FLOWSPEC_FAMILY = '' - def _is_valid_ext_comm_attr(self, attr): - """Validates *attr* as string representation of RT or SOO. + def __init__(self, length=0, rules=None): + self.length = length + rules = rules or [] + for r in rules: + assert isinstance(r, _FlowSpecComponentBase) + self.rules = rules - Returns True if *attr* is as per our convention of RT or SOO, else - False. Our convention is to represent RT/SOO is a string with format: - *global_admin_part:local_admin_path* - """ - is_valid = True + @classmethod + def parser(cls, buf): + (length,) = struct.unpack_from( + cls._LENGTH_LONG_FMT, six.binary_type(buf)) - if not isinstance(attr, str): - is_valid = False + if length < cls._LENGTH_THRESHOLD: + length >>= 8 + offset = cls.LENGTH_SHORT_SIZE else: - first, second = attr.split(':') - try: - if '.' in first: - socket.inet_aton(first) - else: - int(first) - int(second) - except (ValueError, socket.error): - is_valid = False - - return is_valid + offset = cls.LENGTH_LONG_SIZE - @property - def formatted_nlri_str(self): - return "%s:%s" % (self.origin_as, self.route_target) + kwargs = {'length': length} + rest = buf[offset:offset + length] - def is_default_rtnlri(self): - if (self._origin_as is self.DEFAULT_AS and - self._route_target is self.DEFAULT_RT): - return True - return False + if cls.ROUTE_FAMILY.safi == subaddr_family.VPN_FLOWSPEC: + route_dist = _RouteDistinguisher.parser(rest[:8]) + kwargs['route_dist'] = route_dist.formatted_str + rest = rest[8:] - def __lt__(self, other): - return ((self.origin_as, self.route_target) < - (other.origin_as, other.route_target)) + rules = [] - def __eq__(self, other): - return ((self.origin_as, self.route_target) == - (other.origin_as, other.route_target)) + while rest: + subcls, rest = _FlowSpecComponentBase.parse_header( + rest, cls.ROUTE_FAMILY.afi) - def __hash__(self): - return hash((self.origin_as, self.route_target)) + while rest: + rule, rest = subcls.parse_body(rest) + rules.append(rule) - @classmethod - def parser(cls, buf): - idx = 0 + if (not isinstance(rule, _FlowSpecOperatorBase) or + rule.operator & rule.END_OF_LIST): + break - # Extract origin AS. - origin_as, = struct.unpack_from('!I', buf, idx) - idx += 4 + kwargs['rules'] = rules - # Extract route target. - route_target = _ExtendedCommunity(buf[idx:]) - return cls(origin_as, route_target) + return cls(**kwargs), rest def serialize(self): - rt_nlri = b'' - if not self.is_default_rtnlri(): - rt_nlri += struct.pack('!I', self.origin_as) - # Encode route target - rt_nlri += self.route_target.serialize() + rules_bin = b'' - # RT Nlri is 12 octets - return struct.pack('B', (8 * 12)) + rt_nlri + if self.ROUTE_FAMILY.safi == subaddr_family.VPN_FLOWSPEC: + route_dist = _RouteDistinguisher.from_str(self.route_dist) + rules_bin += route_dist.serialize() + self.rules.sort(key=lambda x: x.type) + for _, rules in itertools.groupby(self.rules, key=lambda x: x.type): + rules = list(rules) + rules_bin += rules[0].serialize_header() -def _addr_class_key(route_family): - return route_family.afi, route_family.safi + if isinstance(rules[-1], _FlowSpecOperatorBase): + rules[-1].operator |= rules[-1].END_OF_LIST + for r in rules: + rules_bin += r.serialize_body() -_ADDR_CLASSES = { - _addr_class_key(RF_IPv4_UC): IPAddrPrefix, - _addr_class_key(RF_IPv6_UC): IP6AddrPrefix, - _addr_class_key(RF_IPv4_MPLS): LabelledIPAddrPrefix, - _addr_class_key(RF_IPv6_MPLS): LabelledIP6AddrPrefix, - _addr_class_key(RF_IPv4_VPN): LabelledVPNIPAddrPrefix, - _addr_class_key(RF_IPv6_VPN): LabelledVPNIP6AddrPrefix, - _addr_class_key(RF_L2_EVPN): EvpnNLRI, - _addr_class_key(RF_RTC_UC): RouteTargetMembershipNLRI, -} + self.length = len(rules_bin) + if self.length < self._LENGTH_THRESHOLD: + buf = struct.pack(self._LENGTH_SHORT_FMT, self.length) + else: + buf = struct.pack(self._LENGTH_LONG_FMT, self.length) -def _get_addr_class(afi, safi): - try: - return _ADDR_CLASSES[(afi, safi)] - except KeyError: - return _BinAddrPrefix + return buf + rules_bin + @classmethod + def _from_user(cls, **kwargs): + rules = [] + for k, v in kwargs.items(): + subcls = _FlowSpecComponentBase.lookup_type_name( + k, cls.ROUTE_FAMILY.afi) + rule = subcls.from_str(str(v)) + rules.extend(rule) + rules.sort(key=lambda x: x.type) + return cls(rules=rules) -class _OptParam(StringifyMixin, _TypeDisp, _Value): - _PACK_STR = '!BB' # type, length + @property + def prefix(self): + def _format(i): + pairs = [] + i.rules.sort(key=lambda x: x.type) + previous_type = None + for r in i.rules: + if r.type == previous_type: + if r.to_str()[0] != '&': + pairs[-1] += '|' + pairs[-1] += r.to_str() + else: + pairs.append('%s:%s' % (r.COMPONENT_NAME, r.to_str())) + previous_type = r.type - def __init__(self, type_, value=None, length=None): - if type_ is None: - type_ = self._rev_lookup_type(self.__class__) - self.type = type_ - self.length = length - if value is not None: - self.value = value + return ','.join(pairs) - @classmethod - def parser(cls, buf): - (type_, length) = struct.unpack_from(cls._PACK_STR, - six.binary_type(buf)) - rest = buf[struct.calcsize(cls._PACK_STR):] - value = bytes(rest[:length]) - rest = rest[length:] - subcls = cls._lookup_type(type_) - caps = subcls.parse_value(value) - if not isinstance(caps, list): - caps = [subcls(type_=type_, length=length, **caps[0])] - return caps, rest + return '%s(%s)' % (self.FLOWSPEC_FAMILY, _format(self)) - def serialize(self): - # fixup - value = self.serialize_value() - self.length = len(value) + @property + def formatted_nlri_str(self): + return self.prefix - buf = bytearray() - msg_pack_into(self._PACK_STR, buf, 0, self.type, self.length) - return buf + value +class FlowSpecIPv4NLRI(_FlowSpecNLRIBase): + """ + Flow Specification NLRI class for IPv4 [RFC 5575] + """ + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + FLOWSPEC_FAMILY = 'ipv4fs' -@_OptParam.register_unknown_type() -class BGPOptParamUnknown(_OptParam): @classmethod - def parse_value(cls, buf): - return { - 'value': buf - }, cls + def from_user(cls, **kwargs): + """ + Utility method for creating a NLRI instance. - def serialize_value(self): - return self.value + This function returns a NLRI instance from human readable format value. + :param kwargs: The following arguments are available. -@_OptParam.register_type(BGP_OPT_CAPABILITY) -class _OptParamCapability(_OptParam, _TypeDisp): - _CAP_HDR_PACK_STR = '!BB' + =========== ============= ========= ============================== + Argument Value Operator Description + =========== ============= ========= ============================== + dst_prefix IPv4 Prefix Nothing Destination Prefix. + src_prefix IPv4 Prefix Nothing Source Prefix. + ip_proto Integer Numeric IP Protocol. + port Integer Numeric Port number. + dst_port Integer Numeric Destination port number. + src_port Integer Numeric Source port number. + icmp_type Integer Numeric ICMP type. + icmp_code Integer Numeric ICMP code. + tcp_flags Fixed string Bitmask TCP flags. + Supported values are + ``CWR``, ``ECN``, ``URGENT``, + ``ACK``, ``PUSH``, ``RST``, + ``SYN`` and ``FIN``. + packet_len Integer Numeric Packet length. + dscp Integer Numeric Differentiated Services + Code Point. + fragment Fixed string Bitmask Fragment. + Supported values are + ``DF`` (Don't fragment), + ``ISF`` (Is a fragment), + ``FF`` (First fragment) and + ``LF`` (Last fragment) + =========== ============= ========= ============================== + + Example:: + + >>> msg = bgp.FlowSpecIPv4NLRI.from_user( + ... dst_prefix='10.0.0.0/24', + ... src_prefix='20.0.0.1/24', + ... ip_proto=6, + ... port='80 | 8000', + ... dst_port='>9000 & <9050', + ... src_port='>=8500 & <=9000', + ... icmp_type=0, + ... icmp_code=6, + ... tcp_flags='SYN+ACK & !=URGENT', + ... packet_len=1000, + ... dscp='22 | 24', + ... fragment='LF | ==FF') + >>> + + You can specify conditions with the following keywords. + + The following keywords can be used when the operator type is Numeric. + + ========== ============================================================ + Keyword Description + ========== ============================================================ + < Less than comparison between data and value. + <= Less than or equal to comparison between data and value. + > Greater than comparison between data and value. + >= Greater than or equal to comparison between data and value. + == Equality between data and value. + This operator can be omitted. + ========== ============================================================ + + The following keywords can be used when the operator type is Bitmask. + + ========== ================================================ + Keyword Description + ========== ================================================ + != Not equal operation. + == Exact match operation if specified. + Otherwise partial match operation. + `+` Used for the summation of bitmask values. + (e.g., SYN+ACK) + ========== ================================================ + + You can combine the multiple conditions with the following operators. + + ========== ======================================= + Keyword Description + ========== ======================================= + `|` Logical OR operation + & Logical AND operation + ========== ======================================= - def __init__(self, cap_code=None, cap_value=None, cap_length=None, - type_=None, length=None): - super(_OptParamCapability, self).__init__(type_=BGP_OPT_CAPABILITY, - length=length) - if cap_code is None: - cap_code = self._rev_lookup_type(self.__class__) - self.cap_code = cap_code - if cap_value is not None: - self.cap_value = cap_value - if cap_length is not None: - self.cap_length = cap_length + :return: A instance of FlowSpecVPNv4NLRI. + """ + return cls._from_user(**kwargs) - @classmethod - def parse_value(cls, buf): - caps = [] - while len(buf) > 0: - (code, length) = struct.unpack_from(cls._CAP_HDR_PACK_STR, - six.binary_type(buf)) - value = buf[struct.calcsize(cls._CAP_HDR_PACK_STR):] - buf = buf[length + 2:] - kwargs = { - 'cap_code': code, - 'cap_length': length, - } - subcls = cls._lookup_type(code) - kwargs.update(subcls.parse_cap_value(value)) - caps.append(subcls(type_=BGP_OPT_CAPABILITY, length=length + 2, - **kwargs)) - return caps - def serialize_value(self): - # fixup - cap_value = self.serialize_cap_value() - self.cap_length = len(cap_value) +class FlowSpecVPNv4NLRI(_FlowSpecNLRIBase): + """ + Flow Specification NLRI class for VPNv4 [RFC 5575] + """ - buf = bytearray() - msg_pack_into(self._CAP_HDR_PACK_STR, buf, 0, self.cap_code, - self.cap_length) - return buf + cap_value + # flow-spec NLRI: + # +-----------------------------------+ + # | length (0xnn or 0xfn nn) | + # +-----------------------------------+ + # | RD (8 octets) | + # +-----------------------------------+ + # | NLRI value (variable) | + # +-----------------------------------+ + ROUTE_FAMILY = RF_VPNv4_FLOWSPEC + FLOWSPEC_FAMILY = 'vpnv4fs' + def __init__(self, length=0, route_dist=None, rules=None): + super(FlowSpecVPNv4NLRI, self).__init__(length, rules) + assert route_dist is not None + self.route_dist = route_dist -class _OptParamEmptyCapability(_OptParamCapability): @classmethod - def parse_cap_value(cls, buf): - return {} + def _from_user(cls, route_dist, **kwargs): + rules = [] + for k, v in kwargs.items(): + subcls = _FlowSpecComponentBase.lookup_type_name( + k, cls.ROUTE_FAMILY.afi) + rule = subcls.from_str(str(v)) + rules.extend(rule) + rules.sort(key=lambda x: x.type) + return cls(route_dist=route_dist, rules=rules) - def serialize_cap_value(self): - return bytearray() + @classmethod + def from_user(cls, route_dist, **kwargs): + """ + Utility method for creating a NLRI instance. + This function returns a NLRI instance from human readable format value. -@_OptParamCapability.register_unknown_type() -class BGPOptParamCapabilityUnknown(_OptParamCapability): - @classmethod - def parse_cap_value(cls, buf): - return {'cap_value': buf} + :param route_dist: Route Distinguisher. + :param kwargs: See :py:mod:`ryu.lib.packet.bgp.FlowSpecIPv4NLRI` - def serialize_cap_value(self): - return self.cap_value + Example:: + >>> msg = bgp.FlowSpecIPv4NLRI.from_user( + ... route_dist='65000:1000', + ... dst_prefix='10.0.0.0/24', + ... src_prefix='20.0.0.1/24', + ... ip_proto=6, + ... port='80 | 8000', + ... dst_port='>9000 & <9050', + ... src_port='>=8500 & <=9000', + ... icmp_type=0, + ... icmp_code=6, + ... tcp_flags='SYN+ACK & !=URGENT', + ... packet_len=1000, + ... dscp='22 | 24', + ... fragment='LF | ==FF') + >>> + """ + return cls._from_user(route_dist, **kwargs) -@_OptParamCapability.register_type(BGP_CAP_ROUTE_REFRESH) -class BGPOptParamCapabilityRouteRefresh(_OptParamEmptyCapability): - pass + @property + def formatted_nlri_str(self): + return '%s:%s' % (self.route_dist, self.prefix) + + +class FlowSpecIPv6NLRI(_FlowSpecNLRIBase): + """ + Flow Specification NLRI class for IPv6 [RFC draft-ietf-idr-flow-spec-v6-08] + """ + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + FLOWSPEC_FAMILY = 'ipv6fs' + + @classmethod + def from_user(cls, **kwargs): + """ + Utility method for creating a NLRI instance. + + This function returns a NLRI instance from human readable format value. + + :param kwargs: The following arguments are available. + + =========== ============= ========= ============================== + Argument Value Operator Description + =========== ============= ========= ============================== + dst_prefix IPv6 Prefix Nothing Destination Prefix. + src_prefix IPv6 Prefix Nothing Source Prefix. + next_header Integer Numeric Next Header. + port Integer Numeric Port number. + dst_port Integer Numeric Destination port number. + src_port Integer Numeric Source port number. + icmp_type Integer Numeric ICMP type. + icmp_code Integer Numeric ICMP code. + tcp_flags Fixed string Bitmask TCP flags. + Supported values are + ``CWR``, ``ECN``, ``URGENT``, + ``ACK``, ``PUSH``, ``RST``, + ``SYN`` and ``FIN``. + packet_len Integer Numeric Packet length. + dscp Integer Numeric Differentiated Services + Code Point. + fragment Fixed string Bitmask Fragment. + Supported values are + ``ISF`` (Is a fragment), + ``FF`` (First fragment) and + ``LF`` (Last fragment) + flow_label Intefer Numeric Flow Label. + =========== ============= ========= ============================== + + .. Note:: + + For ``dst_prefix`` and ``src_prefix``, you can give "offset" value + like this: ``2001::2/128/32``. At this case, ``offset`` is 32. + ``offset`` can be omitted, then ``offset`` is treated as 0. + """ + return cls._from_user(**kwargs) + + +class FlowSpecVPNv6NLRI(_FlowSpecNLRIBase): + """ + Flow Specification NLRI class for VPNv6 [draft-ietf-idr-flow-spec-v6-08] + """ + + # flow-spec NLRI: + # +-----------------------------------+ + # | length (0xnn or 0xfn nn) | + # +-----------------------------------+ + # | RD (8 octets) | + # +-----------------------------------+ + # | NLRI value (variable) | + # +-----------------------------------+ + ROUTE_FAMILY = RF_VPNv6_FLOWSPEC + FLOWSPEC_FAMILY = 'vpnv6fs' + + def __init__(self, length=0, route_dist=None, rules=None): + super(FlowSpecVPNv6NLRI, self).__init__(length, rules) + assert route_dist is not None + self.route_dist = route_dist + + @classmethod + def _from_user(cls, route_dist, **kwargs): + rules = [] + for k, v in kwargs.items(): + subcls = _FlowSpecComponentBase.lookup_type_name( + k, cls.ROUTE_FAMILY.afi) + rule = subcls.from_str(str(v)) + rules.extend(rule) + rules.sort(key=lambda x: x.type) + return cls(route_dist=route_dist, rules=rules) + + @classmethod + def from_user(cls, route_dist, **kwargs): + """ + Utility method for creating a NLRI instance. + + This function returns a NLRI instance from human readable format value. + + :param route_dist: Route Distinguisher. + :param kwargs: See :py:mod:`ryu.lib.packet.bgp.FlowSpecIPv6NLRI` + """ + return cls._from_user(route_dist, **kwargs) + + @property + def formatted_nlri_str(self): + return '%s:%s' % (self.route_dist, self.prefix) + + +class FlowSpecL2VPNNLRI(_FlowSpecNLRIBase): + """ + Flow Specification NLRI class for L2VPN [draft-ietf-idr-flowspec-l2vpn-05] + """ + + # flow-spec NLRI: + # +-----------------------------------+ + # | length (0xnn or 0xfn nn) | + # +-----------------------------------+ + # | RD (8 octets) | + # +-----------------------------------+ + # | NLRI value (variable) | + # +-----------------------------------+ + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + FLOWSPEC_FAMILY = 'l2vpnfs' + + def __init__(self, length=0, route_dist=None, rules=None): + super(FlowSpecL2VPNNLRI, self).__init__(length, rules) + assert route_dist is not None + self.route_dist = route_dist + + @classmethod + def _from_user(cls, route_dist, **kwargs): + rules = [] + for k, v in kwargs.items(): + subcls = _FlowSpecComponentBase.lookup_type_name( + k, cls.ROUTE_FAMILY.afi) + rule = subcls.from_str(str(v)) + rules.extend(rule) + rules.sort(key=lambda x: x.type) + return cls(route_dist=route_dist, rules=rules) + + @classmethod + def from_user(cls, route_dist, **kwargs): + """ + Utility method for creating a L2VPN NLRI instance. + + This function returns a L2VPN NLRI instance + from human readable format value. + + :param kwargs: The following arguments are available. + + ============== ============= ========= ============================== + Argument Value Operator Description + ============== ============= ========= ============================== + ether_type Integer Numeric Ethernet Type. + src_mac Mac Address Nothing Source Mac address. + dst_mac Mac Address Nothing Destination Mac address. + llc_ssap Integer Numeric Source Service Access Point + in LLC. + llc_dsap Integer Numeric Destination Service Access + Point in LLC. + llc_control Integer Numeric Control field in LLC. + snap Integer Numeric Sub-Network Access Protocol + field. + vlan_id Integer Numeric VLAN ID. + vlan_cos Integer Numeric VLAN COS field. + inner_vlan_id Integer Numeric Inner VLAN ID. + inner_vlan_cos Integer Numeric Inner VLAN COS field. + ============== ============= ========= ============================== + """ + return cls._from_user(route_dist, **kwargs) + + @property + def formatted_nlri_str(self): + return '%s:%s' % (self.route_dist, self.prefix) + + +class _FlowSpecComponentBase(StringifyMixin, TypeDisp): + """ + Base class for Flow Specification NLRI component + """ + COMPONENT_NAME = None + + _BASE_STR = '!B' + _BASE_STR_SIZE = struct.calcsize(_BASE_STR) + + # Dictionary of COMPONENT_NAME to subclass. + # e.g.) + # _NAMES = {'dst_prefix': FlowSpecDestPrefix, ...} + _NAMES = {} + + def __init__(self, type_=None): + if type_ is None: + type_, _ = self._rev_lookup_type(self.__class__) + self.type = type_ + + @classmethod + def register_type(cls, type_, afi): + cls._TYPES = cls._TYPES.copy() + cls._NAMES = cls._NAMES.copy() + + def _register_type(subcls): + cls._TYPES[(type_, afi)] = subcls + cls._NAMES[(subcls.COMPONENT_NAME, afi)] = subcls + cls._REV_TYPES = None + return subcls + + return _register_type + + @classmethod + def lookup_type_name(cls, type_name, afi): + return cls._NAMES[(type_name, afi)] + + @classmethod + def _lookup_type(cls, type_, afi): + try: + return cls._TYPES[(type_, afi)] + except KeyError: + return cls._UNKNOWN_TYPE + + @classmethod + def parse_header(cls, rest, afi): + (type_,) = struct.unpack_from( + cls._BASE_STR, six.binary_type(rest)) + rest = rest[cls._BASE_STR_SIZE:] + return cls._lookup_type(type_, afi), rest + + def serialize_header(self): + return struct.pack(self._BASE_STR, self.type) + + +class _FlowSpecIPv4Component(_FlowSpecComponentBase): + """ + Base class for Flow Specification for IPv4 NLRI component + """ + TYPE_DESTINATION_PREFIX = 0x01 + TYPE_SOURCE_PREFIX = 0x02 + TYPE_PROTOCOL = 0x03 + TYPE_PORT = 0x04 + TYPE_DESTINATION_PORT = 0x05 + TYPE_SOURCE_PORT = 0x06 + TYPE_ICMP = 0x07 + TYPE_ICMP_CODE = 0x08 + TYPE_TCP_FLAGS = 0x09 + TYPE_PACKET_LENGTH = 0x0a + TYPE_DIFFSERV_CODE_POINT = 0x0b + TYPE_FRAGMENT = 0x0c + + +class _FlowSpecIPv6Component(_FlowSpecComponentBase): + """ + Base class for Flow Specification for IPv6 NLRI component + """ + TYPE_DESTINATION_PREFIX = 0x01 + TYPE_SOURCE_PREFIX = 0x02 + TYPE_NEXT_HEADER = 0x03 + TYPE_PORT = 0x04 + TYPE_DESTINATION_PORT = 0x05 + TYPE_SOURCE_PORT = 0x06 + TYPE_ICMP = 0x07 + TYPE_ICMP_CODE = 0x08 + TYPE_TCP_FLAGS = 0x09 + TYPE_PACKET_LENGTH = 0x0a + TYPE_DIFFSERV_CODE_POINT = 0x0b + TYPE_FRAGMENT = 0x0c + TYPE_FLOW_LABEL = 0x0d + + +class _FlowSpecL2VPNComponent(_FlowSpecComponentBase): + """ + Base class for Flow Specification for L2VPN NLRI component + """ + TYPE_ETHER_TYPE = 0x0e + TYPE_SOURCE_MAC = 0x0f + TYPE_DESTINATION_MAC = 0x10 + TYPE_LLC_DSAP = 0x11 + TYPE_LLC_SSAP = 0x12 + TYPE_LLC_CONTROL = 0x13 + TYPE_SNAP = 0x14 + TYPE_VLAN_ID = 0x15 + TYPE_VLAN_COS = 0x16 + TYPE_INNER_VLAN_ID = 0x17 + TYPE_INNER_VLAN_COS = 0x18 + + +@_FlowSpecComponentBase.register_unknown_type() +class FlowSpecComponentUnknown(_FlowSpecComponentBase): + """ + Unknown component type for Flow Specification NLRI component + """ + + def __init__(self, buf, type_=None): + super(FlowSpecComponentUnknown, self).__init__(type_) + self.buf = buf + + @classmethod + def parse_body(cls, buf): + return cls(buf), None + + def serialize_body(self): + return self.buf + + +class _FlowSpecPrefixBase(_FlowSpecIPv4Component, IPAddrPrefix): + """ + Prefix base class for Flow Specification NLRI component + """ + + def __init__(self, length, addr, type_=None): + super(_FlowSpecPrefixBase, self).__init__(type_) + self.length = length + prefix = "%s/%s" % (addr, length) + self.addr = str(netaddr.ip.IPNetwork(prefix).network) + + @classmethod + def parse_body(cls, buf): + return cls.parser(buf) + + def serialize_body(self): + return self.serialize() + + @classmethod + def from_str(cls, value): + rule = [] + addr, length = value.split('/') + rule.append(cls(int(length), addr)) + return rule + + @property + def value(self): + return "%s/%s" % (self.addr, self.length) + + def to_str(self): + return self.value + + +class _FlowSpecIPv6PrefixBase(_FlowSpecIPv6Component, IP6AddrPrefix): + """ + Prefix base class for Flow Specification NLRI component + """ + _PACK_STR = '!BB' # length, offset + + def __init__(self, length, addr, offset=0, type_=None): + super(_FlowSpecIPv6PrefixBase, self).__init__(type_) + self.length = length + self.offset = offset + prefix = "%s/%s" % (addr, length) + self.addr = str(netaddr.ip.IPNetwork(prefix).network) + + @classmethod + def parser(cls, buf): + (length, offset) = struct.unpack_from( + cls._PACK_STR, six.binary_type(buf)) + rest = buf[struct.calcsize(cls._PACK_STR):] + byte_length = (length + 7) // 8 + addr = cls._from_bin(rest[:byte_length]) + rest = rest[byte_length:] + return cls(length=length, offset=offset, addr=addr), rest + + @classmethod + def parse_body(cls, buf): + return cls.parser(buf) + + def serialize(self): + byte_length = (self.length + 7) // 8 + bin_addr = self._to_bin(self.addr)[:byte_length] + buf = bytearray() + msg_pack_into(self._PACK_STR, buf, 0, self.length, self.offset) + return buf + bin_addr + + def serialize_body(self): + return self.serialize() + + @classmethod + def from_str(cls, value): + rule = [] + values = value.split('/') + if len(values) == 3: + rule.append(cls(int(values[1]), values[0], offset=int(values[2]))) + else: + rule.append(cls(int(values[1]), values[0])) + return rule + + @property + def value(self): + return "%s/%s/%s" % (self.addr, self.length, self.offset) + + def to_str(self): + return self.value + + +class _FlowSpecL2VPNPrefixBase(_FlowSpecL2VPNComponent): + """ + Prefix base class for Flow Specification NLRI component + """ + _PACK_STR = "!B6s" + + def __init__(self, length, addr, type_=None): + super(_FlowSpecL2VPNPrefixBase, self).__init__(type_) + self.length = length + self.addr = addr.lower() + + @classmethod + def parse_body(cls, buf): + (length, addr) = struct.unpack_from( + cls._PACK_STR, six.binary_type(buf)) + rest = buf[struct.calcsize(cls._PACK_STR):] + addr = addrconv.mac.bin_to_text(addr) + return cls(length=length, addr=addr), rest + + def serialize(self): + addr = addrconv.mac.text_to_bin(self.addr) + return struct.pack(self._PACK_STR, self.length, addr) + + def serialize_body(self): + return self.serialize() + + @classmethod + def from_str(cls, value): + return [cls(len(value.split(':')), value)] + + @property + def value(self): + return self.addr + + def to_str(self): + return self.value + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_DESTINATION_PREFIX, addr_family.IP) +class FlowSpecDestPrefix(_FlowSpecPrefixBase): + """ + Destination Prefix for Flow Specification NLRI component + """ + COMPONENT_NAME = 'dst_prefix' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_SOURCE_PREFIX, addr_family.IP) +class FlowSpecSrcPrefix(_FlowSpecPrefixBase): + """ + Source Prefix for Flow Specification NLRI component + """ + COMPONENT_NAME = 'src_prefix' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_DESTINATION_PREFIX, addr_family.IP6) +class FlowSpecIPv6DestPrefix(_FlowSpecIPv6PrefixBase): + """ + IPv6 destination Prefix for Flow Specification NLRI component + """ + COMPONENT_NAME = 'dst_prefix' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_SOURCE_PREFIX, addr_family.IP6) +class FlowSpecIPv6SrcPrefix(_FlowSpecIPv6PrefixBase): + """ + IPv6 source Prefix for Flow Specification NLRI component + """ + COMPONENT_NAME = 'src_prefix' + + +class _FlowSpecOperatorBase(_FlowSpecComponentBase): + """Operator base class for Flow Specification NLRI component + + ===================== =============================================== + Attribute Description + ===================== =============================================== + operator Match conditions. + value Value of component. + ===================== =============================================== + """ + _OPE_PACK_STR = '!B' + _OPE_PACK_STR_SIZE = struct.calcsize(_OPE_PACK_STR) + _VAL_PACK_STR = '!%ds' + + END_OF_LIST = 1 << 7 # END OF LIST bit + AND = 1 << 6 # AND bit + OR = 0 # OR + _LENGTH_BIT_MASK = 0x30 # The mask for length of the value + + _logical_conditions = { + "|": OR, + "&": AND, + } + _comparison_conditions = {} + + def __init__(self, operator, value, type_=None): + super(_FlowSpecOperatorBase, self).__init__(type_) + self.operator = operator + self.value = value + + @classmethod + def parse_body(cls, rest): + (operator,) = struct.unpack_from(cls._OPE_PACK_STR, + six.binary_type(rest)) + rest = rest[cls._OPE_PACK_STR_SIZE:] + length = 1 << ((operator & cls._LENGTH_BIT_MASK) >> 4) + value_type = type_desc.IntDescr(length) + value = value_type.to_user(rest) + rest = rest[length:] + + return cls(operator, value), rest + + def serialize_body(self): + byte_length = (self.value.bit_length() + 7) // 8 or 1 + length = int(math.ceil(math.log(byte_length, 2))) + self.operator |= length << 4 + buf = struct.pack(self._OPE_PACK_STR, self.operator) + value_type = type_desc.IntDescr(1 << length) + buf += struct.pack(self._VAL_PACK_STR % (1 << length), + value_type.from_user(self.value)) + + return buf + + @classmethod + def from_str(cls, val): + operator = 0 + rules = [] + + # e.g.) + # value = '80 | ==90|>=8000&<=9000 | <100 & >110' + # elements = ['80', '|', '==', '90', '|', '>=', '8000', '&', + # '<=', '9000', '|', '<', '100', '&', '>', '110'] + elements = [v.strip() for v in re.split( + r'([0-9]+)|([A-Z]+)|(\|&\+)|([!=<>]+)', val) if v and v.strip()] + + elms_iter = iter(elements) + + for elm in elms_iter: + if elm in cls._logical_conditions: + # ['&', '|'] + operator |= cls._logical_conditions[elm] + continue + elif elm in cls._comparison_conditions: + # ['=', '<', '>', '<=', '>=' ] or ['=', '!='] + operator |= cls._comparison_conditions[elm] + continue + elif elm == '+': + # If keyword "+" is used, add the value to the previous rule. + # e.g.) 'SYN+ACK' or '!=SYN+ACK' + rules[-1].value |= cls._to_value(next(elms_iter)) + continue + + value = cls._to_value(elm) + + operator = cls.normalize_operator(operator) + + rules.append(cls(operator, value)) + operator = 0 + + return rules + + @classmethod + def _to_value(cls, value): + return value + + @classmethod + def normalize_operator(cls, operator): + return operator + + +class _FlowSpecNumeric(_FlowSpecOperatorBase): + """ + Numeric operator class for Flow Specification NLRI component + """ + # Numeric operator format + # 0 1 2 3 4 5 6 7 + # +---+---+---+---+---+---+---+---+ + # | e | a | len | 0 |lt |gt |eq | + # +---+---+---+---+---+---+---+---+ + + LT = 1 << 2 # Less than comparison bit + GT = 1 << 1 # Greater than comparison bit + EQ = 1 << 0 # Equality bit + + _comparison_conditions = { + '==': EQ, + '<': LT, + '>': GT, + '<=': LT | EQ, + '>=': GT | EQ + } + + @classmethod + def _to_value(cls, value): + try: + return int(str(value), 0) + except ValueError: + raise ValueError('Invalid params: %s="%s"' % ( + cls.COMPONENT_NAME, value)) + + def to_str(self): + string = "" + if self.operator & self.AND: + string += "&" + + operator = self.operator & (self.LT | self.GT | self.EQ) + for k, v in self._comparison_conditions.items(): + if operator == v: + string += k + + string += str(self.value) + + return string + + @classmethod + def normalize_operator(cls, operator): + if operator & (cls.LT | cls.GT | cls.EQ): + return operator + else: + return operator | cls.EQ + + +class _FlowSpecBitmask(_FlowSpecOperatorBase): + """ + Bitmask operator class for Flow Specification NLRI component + """ + # Bitmask operator format + # 0 1 2 3 4 5 6 7 + # +---+---+---+---+---+---+---+---+ + # | e | a | len | 0 | 0 |not| m | + # +---+---+---+---+---+---+---+---+ + + NOT = 1 << 1 # NOT bit + MATCH = 1 << 0 # MATCH bit + + _comparison_conditions = { + '!=': NOT, + '==': MATCH, + } + + _bitmask_flags = {} + + @classmethod + def _to_value(cls, value): + try: + return cls.__dict__[value] + except KeyError: + raise ValueError('Invalid params: %s="%s"' % ( + cls.COMPONENT_NAME, value)) + + def to_str(self): + string = "" + if self.operator & self.AND: + string += "&" + + operator = self.operator & (self.NOT | self.MATCH) + for k, v in self._comparison_conditions.items(): + if operator == v: + string += k + + plus = "" + for k, v in self._bitmask_flags.items(): + if self.value & k: + string += plus + v + plus = "+" + + return string + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_PROTOCOL, addr_family.IP) +class FlowSpecIPProtocol(_FlowSpecNumeric): + """IP Protocol for Flow Specification NLRI component + + Set the IP protocol number at value. + """ + COMPONENT_NAME = 'ip_proto' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_NEXT_HEADER, addr_family.IP6) +class FlowSpecNextHeader(_FlowSpecNumeric): + """Next Header value in IPv6 packets + + Set the IP protocol number at value + """ + COMPONENT_NAME = 'next_header' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_PORT, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_PORT, addr_family.IP6) +class FlowSpecPort(_FlowSpecNumeric): + """Port number for Flow Specification NLRI component + + Set the source or destination TCP/UDP ports at value. + """ + COMPONENT_NAME = 'port' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_DESTINATION_PORT, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_DESTINATION_PORT, addr_family.IP6) +class FlowSpecDestPort(_FlowSpecNumeric): + """Destination port number for Flow Specification NLRI component + + Set the destination port of a TCP or UDP packet at value. + """ + COMPONENT_NAME = 'dst_port' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_SOURCE_PORT, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_SOURCE_PORT, addr_family.IP6) +class FlowSpecSrcPort(_FlowSpecNumeric): + """Source port number for Flow Specification NLRI component + + Set the source port of a TCP or UDP packet at value. + """ + COMPONENT_NAME = 'src_port' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_ICMP, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_ICMP, addr_family.IP6) +class FlowSpecIcmpType(_FlowSpecNumeric): + """ICMP type for Flow Specification NLRI component + + Set the type field of an ICMP packet at value. + """ + COMPONENT_NAME = 'icmp_type' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_ICMP_CODE, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_ICMP_CODE, addr_family.IP6) +class FlowSpecIcmpCode(_FlowSpecNumeric): + """ICMP code Flow Specification NLRI component + + Set the code field of an ICMP packet at value. + """ + COMPONENT_NAME = 'icmp_code' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_TCP_FLAGS, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_TCP_FLAGS, addr_family.IP6) +class FlowSpecTCPFlags(_FlowSpecBitmask): + """TCP flags for Flow Specification NLRI component + + Supported TCP flags are CWR, ECN, URGENT, ACK, PUSH, RST, SYN and FIN. + """ + COMPONENT_NAME = 'tcp_flags' + + # bitmask format + # 0 1 2 3 4 5 6 7 + # +----+----+----+----+----+----+----+----+ + # |CWR |ECN |URG |ACK |PSH |RST |SYN |FIN | + # +----+----+----+----+----+----+----+----+ + + CWR = 1 << 7 + ECN = 1 << 6 + URGENT = 1 << 5 + ACK = 1 << 4 + PUSH = 1 << 3 + RST = 1 << 2 + SYN = 1 << 1 + FIN = 1 << 0 + + _bitmask_flags = collections.OrderedDict() + _bitmask_flags[SYN] = 'SYN' + _bitmask_flags[ACK] = 'ACK' + _bitmask_flags[FIN] = 'FIN' + _bitmask_flags[RST] = 'RST' + _bitmask_flags[PUSH] = 'PUSH' + _bitmask_flags[URGENT] = 'URGENT' + _bitmask_flags[ECN] = 'ECN' + _bitmask_flags[CWR] = 'CWR' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_PACKET_LENGTH, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_PACKET_LENGTH, addr_family.IP6) +class FlowSpecPacketLen(_FlowSpecNumeric): + """Packet length for Flow Specification NLRI component + + Set the total IP packet length at value. + """ + COMPONENT_NAME = 'packet_len' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_DIFFSERV_CODE_POINT, addr_family.IP) +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_DIFFSERV_CODE_POINT, addr_family.IP6) +class FlowSpecDSCP(_FlowSpecNumeric): + """Diffserv Code Point for Flow Specification NLRI component + + Set the 6-bit DSCP field at value. [RFC2474] + """ + COMPONENT_NAME = 'dscp' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv4Component.TYPE_FRAGMENT, addr_family.IP) +class FlowSpecFragment(_FlowSpecBitmask): + """Fragment for Flow Specification NLRI component + + Set the bitmask for operand format at value. + The following values are supported. + + ========== =============================================== + Attribute Description + ========== =============================================== + LF Last fragment + FF First fragment + ISF Is a fragment + DF Don't fragment + ========== =============================================== + """ + COMPONENT_NAME = 'fragment' + + # bitmask format + # 0 1 2 3 4 5 6 7 + # +---+---+---+---+---+---+---+---+ + # | Reserved |LF |FF |IsF|DF | + # +---+---+---+---+---+---+---+---+ + + LF = 1 << 3 + FF = 1 << 2 + ISF = 1 << 1 + DF = 1 << 0 + + _bitmask_flags = collections.OrderedDict() + _bitmask_flags[LF] = 'LF' + _bitmask_flags[FF] = 'FF' + _bitmask_flags[ISF] = 'ISF' + _bitmask_flags[DF] = 'DF' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_FRAGMENT, addr_family.IP6) +class FlowSpecIPv6Fragment(_FlowSpecBitmask): + """Fragment for Flow Specification for IPv6 NLRI component + + ========== =============================================== + Attribute Description + ========== =============================================== + LF Last fragment + FF First fragment + ISF Is a fragment + ========== =============================================== + """ + COMPONENT_NAME = 'fragment' + + # bitmask format + # 0 1 2 3 4 5 6 7 + # +---+---+---+---+---+---+---+---+ + # | Reserved |LF |FF |IsF| 0 | + # +---+---+---+---+---+---+---+---+ + + LF = 1 << 3 + FF = 1 << 2 + ISF = 1 << 1 + + _bitmask_flags = collections.OrderedDict() + _bitmask_flags[LF] = 'LF' + _bitmask_flags[FF] = 'FF' + _bitmask_flags[ISF] = 'ISF' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_ETHER_TYPE, addr_family.L2VPN) +class FlowSpecEtherType(_FlowSpecNumeric): + """Ethernet Type field in an Ethernet frame. + + Set the 2 byte value of an Ethernet Type field at value. + """ + COMPONENT_NAME = 'ether_type' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_SOURCE_MAC, addr_family.L2VPN) +class FlowSpecSourceMac(_FlowSpecL2VPNPrefixBase): + """Source Mac Address. + + Set the Mac Address at value. + """ + COMPONENT_NAME = 'src_mac' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_DESTINATION_MAC, addr_family.L2VPN) +class FlowSpecDestinationMac(_FlowSpecL2VPNPrefixBase): + """Destination Mac Address. + + Set the Mac Address at value. + """ + COMPONENT_NAME = 'dst_mac' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_LLC_DSAP, addr_family.L2VPN) +class FlowSpecLLCDSAP(_FlowSpecNumeric): + """Destination SAP field in LLC header in an Ethernet frame. + + Set the 2 byte value of an Destination SAP at value. + """ + COMPONENT_NAME = 'llc_dsap' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_LLC_SSAP, addr_family.L2VPN) +class FlowSpecLLCSSAP(_FlowSpecNumeric): + """Source SAP field in LLC header in an Ethernet frame. + + Set the 2 byte value of an Source SAP at value. + """ + COMPONENT_NAME = 'llc_ssap' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_LLC_CONTROL, addr_family.L2VPN) +class FlowSpecLLCControl(_FlowSpecNumeric): + """Control field in LLC header in an Ethernet frame. + + Set the Contorol field at value. + """ + COMPONENT_NAME = 'llc_control' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_SNAP, addr_family.L2VPN) +class FlowSpecSNAP(_FlowSpecNumeric): + """Sub-Network Access Protocol field in an Ethernet frame. + + Set the 5 byte SNAP field at value. + """ + COMPONENT_NAME = 'snap' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_VLAN_ID, addr_family.L2VPN) +class FlowSpecVLANID(_FlowSpecNumeric): + """VLAN ID. + + Set VLAN ID at value. + """ + COMPONENT_NAME = 'vlan_id' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_VLAN_COS, addr_family.L2VPN) +class FlowSpecVLANCoS(_FlowSpecNumeric): + """VLAN CoS Fields in an Ethernet frame. + + Set the 3 bit CoS field at value. + """ + COMPONENT_NAME = 'vlan_cos' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_INNER_VLAN_ID, addr_family.L2VPN) +class FlowSpecInnerVLANID(_FlowSpecNumeric): + """Inner VLAN ID. + + Set VLAN ID at value. + """ + COMPONENT_NAME = 'inner_vlan_id' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecL2VPNComponent.TYPE_INNER_VLAN_COS, addr_family.L2VPN) +class FlowSpecInnerVLANCoS(_FlowSpecNumeric): + """VLAN CoS Fields in an Inner Ethernet frame. + + Set the 3 bit CoS field at value.. + """ + COMPONENT_NAME = 'inner_vlan_cos' + + +@_FlowSpecComponentBase.register_type( + _FlowSpecIPv6Component.TYPE_FLOW_LABEL, addr_family.IP6) +class FlowSpecIPv6FlowLabel(_FlowSpecNumeric): + COMPONENT_NAME = 'flow_label' + + +@functools.total_ordering +class RouteTargetMembershipNLRI(StringifyMixin): + """Route Target Membership NLRI. + + Route Target membership NLRI is advertised in BGP UPDATE messages using + the MP_REACH_NLRI and MP_UNREACH_NLRI attributes. + """ + + ROUTE_FAMILY = RF_RTC_UC + DEFAULT_AS = '0:0' + DEFAULT_RT = '0:0' + + def __init__(self, origin_as, route_target): + # If given is not default_as and default_rt + if not (origin_as is self.DEFAULT_AS and + route_target is self.DEFAULT_RT): + # We validate them + if (not self._is_valid_asn(origin_as) or + not self._is_valid_ext_comm_attr(route_target)): + raise ValueError('Invalid params.') + self.origin_as = origin_as + self.route_target = route_target + + def _is_valid_asn(self, asn): + """Returns True if the given AS number is Two or Four Octet.""" + if isinstance(asn, six.integer_types) and 0 <= asn <= 0xffffffff: + return True + else: + return False + + def _is_valid_ext_comm_attr(self, attr): + """Validates *attr* as string representation of RT or SOO. + + Returns True if *attr* is as per our convention of RT or SOO, else + False. Our convention is to represent RT/SOO is a string with format: + *global_admin_part:local_admin_path* + """ + is_valid = True + + if not isinstance(attr, str): + is_valid = False + else: + first, second = attr.split(':') + try: + if '.' in first: + socket.inet_aton(first) + else: + int(first) + int(second) + except (ValueError, socket.error): + is_valid = False + + return is_valid + + @property + def formatted_nlri_str(self): + return "%s:%s" % (self.origin_as, self.route_target) + + def is_default_rtnlri(self): + if (self._origin_as is self.DEFAULT_AS and + self._route_target is self.DEFAULT_RT): + return True + return False + + def __lt__(self, other): + return ((self.origin_as, self.route_target) < + (other.origin_as, other.route_target)) + + def __eq__(self, other): + return ((self.origin_as, self.route_target) == + (other.origin_as, other.route_target)) + + def __hash__(self): + return hash((self.origin_as, self.route_target)) + + @classmethod + def parser(cls, buf): + idx = 0 + + # Extract origin AS. + origin_as, = struct.unpack_from('!I', buf, idx) + idx += 4 + + # Extract route target. + route_target = _ExtendedCommunity(buf[idx:]) + return cls(origin_as, route_target) + + def serialize(self): + rt_nlri = b'' + if not self.is_default_rtnlri(): + rt_nlri += struct.pack('!I', self.origin_as) + # Encode route target + rt_nlri += self.route_target.serialize() + + # RT Nlri is 12 octets + return struct.pack('B', (8 * 12)) + rt_nlri + + +def _addr_class_key(route_family): + return route_family.afi, route_family.safi + + +_ADDR_CLASSES = { + _addr_class_key(RF_IPv4_UC): IPAddrPrefix, + _addr_class_key(RF_IPv6_UC): IP6AddrPrefix, + _addr_class_key(RF_IPv4_MPLS): LabelledIPAddrPrefix, + _addr_class_key(RF_IPv6_MPLS): LabelledIP6AddrPrefix, + _addr_class_key(RF_IPv4_VPN): LabelledVPNIPAddrPrefix, + _addr_class_key(RF_IPv6_VPN): LabelledVPNIP6AddrPrefix, + _addr_class_key(RF_L2_EVPN): EvpnNLRI, + _addr_class_key(RF_IPv4_FLOWSPEC): FlowSpecIPv4NLRI, + _addr_class_key(RF_IPv6_FLOWSPEC): FlowSpecIPv6NLRI, + _addr_class_key(RF_VPNv4_FLOWSPEC): FlowSpecVPNv4NLRI, + _addr_class_key(RF_VPNv6_FLOWSPEC): FlowSpecVPNv6NLRI, + _addr_class_key(RF_L2VPN_FLOWSPEC): FlowSpecL2VPNNLRI, + _addr_class_key(RF_RTC_UC): RouteTargetMembershipNLRI, +} + + +def _get_addr_class(afi, safi): + try: + return _ADDR_CLASSES[(afi, safi)] + except KeyError: + return _BinAddrPrefix + + +class _OptParam(StringifyMixin, TypeDisp, _Value): + _PACK_STR = '!BB' # type, length + + def __init__(self, type_, value=None, length=None): + if type_ is None: + type_ = self._rev_lookup_type(self.__class__) + self.type = type_ + self.length = length + if value is not None: + self.value = value + + @classmethod + def parser(cls, buf): + (type_, length) = struct.unpack_from(cls._PACK_STR, + six.binary_type(buf)) + rest = buf[struct.calcsize(cls._PACK_STR):] + value = bytes(rest[:length]) + rest = rest[length:] + subcls = cls._lookup_type(type_) + caps = subcls.parse_value(value) + if not isinstance(caps, list): + caps = [subcls(type_=type_, length=length, **caps[0])] + return caps, rest + + def serialize(self): + # fixup + value = self.serialize_value() + self.length = len(value) + + buf = bytearray() + msg_pack_into(self._PACK_STR, buf, 0, self.type, self.length) + return buf + value + + +@_OptParam.register_unknown_type() +class BGPOptParamUnknown(_OptParam): + @classmethod + def parse_value(cls, buf): + return { + 'value': buf + }, cls + + def serialize_value(self): + return self.value + + +@_OptParam.register_type(BGP_OPT_CAPABILITY) +class _OptParamCapability(_OptParam, TypeDisp): + _CAP_HDR_PACK_STR = '!BB' + + def __init__(self, cap_code=None, cap_value=None, cap_length=None, + type_=None, length=None): + super(_OptParamCapability, self).__init__(type_=BGP_OPT_CAPABILITY, + length=length) + if cap_code is None: + cap_code = self._rev_lookup_type(self.__class__) + self.cap_code = cap_code + if cap_value is not None: + self.cap_value = cap_value + if cap_length is not None: + self.cap_length = cap_length + + @classmethod + def parse_value(cls, buf): + caps = [] + while len(buf) > 0: + (code, length) = struct.unpack_from(cls._CAP_HDR_PACK_STR, + six.binary_type(buf)) + value = buf[struct.calcsize(cls._CAP_HDR_PACK_STR):] + buf = buf[length + 2:] + kwargs = { + 'cap_code': code, + 'cap_length': length, + } + subcls = cls._lookup_type(code) + kwargs.update(subcls.parse_cap_value(value)) + caps.append(subcls(type_=BGP_OPT_CAPABILITY, length=length + 2, + **kwargs)) + return caps + + def serialize_value(self): + # fixup + cap_value = self.serialize_cap_value() + self.cap_length = len(cap_value) + + buf = bytearray() + msg_pack_into(self._CAP_HDR_PACK_STR, buf, 0, self.cap_code, + self.cap_length) + return buf + cap_value + + +class _OptParamEmptyCapability(_OptParamCapability): + @classmethod + def parse_cap_value(cls, buf): + return {} + + def serialize_cap_value(self): + return bytearray() + + +@_OptParamCapability.register_unknown_type() +class BGPOptParamCapabilityUnknown(_OptParamCapability): + @classmethod + def parse_cap_value(cls, buf): + return {'cap_value': buf} + + def serialize_cap_value(self): + return self.cap_value + + +@_OptParamCapability.register_type(BGP_CAP_ROUTE_REFRESH) +class BGPOptParamCapabilityRouteRefresh(_OptParamEmptyCapability): + pass @_OptParamCapability.register_type(BGP_CAP_ROUTE_REFRESH_CISCO) @@ -2333,7 +3610,7 @@ pass -class _PathAttribute(StringifyMixin, _TypeDisp, _Value): +class _PathAttribute(StringifyMixin, TypeDisp, _Value): _PACK_STR = '!BB' # flags, type _PACK_STR_LEN = '!B' # length _PACK_STR_EXT_LEN = '!H' # length w/ BGP_ATTR_FLAG_EXTENDED_LENGTH @@ -2443,13 +3720,12 @@ return count - def has_local_as(self, local_as): + def has_local_as(self, local_as, max_count=0): """Check if *local_as* is already present on path list.""" + _count = 0 for as_path_seg in self.value: - for as_num in as_path_seg: - if as_num == local_as: - return True - return False + _count += list(as_path_seg).count(local_as) + return _count > max_count def has_matching_leftmost(self, remote_as): """Check if leftmost AS matches *remote_as*.""" @@ -2750,7 +4026,7 @@ _VALUE_PACK_STR = '!4s' _ATTR_FLAGS = BGP_ATTR_FLAG_OPTIONAL _TYPE = { - 'ascii': [ + 'asciilist': [ 'value' ] } @@ -2841,6 +4117,7 @@ # 01 03 Route Origin Community (IPv4 address specific) # 02 03 Route Origin Community (four-octet AS specific, RFC 5668) # 06 sub-type Ethernet VPN Extended Community (RFC 7432) +# 80 sub-type Flow Specification Extended Community (RFC 5575) @_PathAttribute.register_type(BGP_ATTR_TYPE_EXTENDED_COMMUNITIES) class BGPPathAttributeExtendedCommunities(_PathAttribute): @@ -2893,7 +4170,7 @@ return self._community_list(3) -class _ExtendedCommunity(StringifyMixin, _TypeDisp, _Value): +class _ExtendedCommunity(StringifyMixin, TypeDisp, _Value): _PACK_STR = '!B7s' # type high (+ type low), value _PACK_STR_SIZE = struct.calcsize(_PACK_STR) _SUBTYPE_PACK_STR = '!B' # subtype @@ -2914,6 +4191,20 @@ EVPN_MAC_MOBILITY = (EVPN, SUBTYPE_EVPN_MAC_MOBILITY) EVPN_ESI_LABEL = (EVPN, SUBTYPE_EVPN_ESI_LABEL) EVPN_ES_IMPORT_RT = (EVPN, SUBTYPE_EVPN_ES_IMPORT_RT) + FLOWSPEC = 0x80 + FLOWSPEC_L2VPN = 0x08 + SUBTYPE_FLOWSPEC_TRAFFIC_RATE = 0x06 + SUBTYPE_FLOWSPEC_TRAFFIC_ACTION = 0x07 + SUBTYPE_FLOWSPEC_REDIRECT = 0x08 + SUBTYPE_FLOWSPEC_TRAFFIC_REMARKING = 0x09 + SUBTYPE_FLOWSPEC_VLAN_ACTION = 0x0a + SUBTYPE_FLOWSPEC_TPID_ACTION = 0x0b + FLOWSPEC_TRAFFIC_RATE = (FLOWSPEC, SUBTYPE_FLOWSPEC_TRAFFIC_RATE) + FLOWSPEC_TRAFFIC_ACTION = (FLOWSPEC, SUBTYPE_FLOWSPEC_TRAFFIC_ACTION) + FLOWSPEC_REDIRECT = (FLOWSPEC, SUBTYPE_FLOWSPEC_REDIRECT) + FLOWSPEC_TRAFFIC_REMARKING = (FLOWSPEC, SUBTYPE_FLOWSPEC_TRAFFIC_REMARKING) + FLOWSPEC_VLAN_ACTION = (FLOWSPEC_L2VPN, SUBTYPE_FLOWSPEC_VLAN_ACTION) + FLOWSPEC_TPID_ACTION = (FLOWSPEC_L2VPN, SUBTYPE_FLOWSPEC_TPID_ACTION) def __init__(self, type_=None): if type_ is None: @@ -3073,25 +4364,69 @@ # | Reserved=0 | ESI Label | # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ _VALUE_PACK_STR = '!BB2x3s' - _VALUE_FIELDS = ['subtype', 'flags', 'esi_label'] + _VALUE_FIELDS = ['subtype', 'flags'] - def __init__(self, **kwargs): + # Classification for Flags. + SINGLE_ACTIVE_BIT = 1 << 0 + + def __init__(self, label=None, mpls_label=None, vni=None, **kwargs): super(BGPEvpnEsiLabelExtendedCommunity, self).__init__() self.do_init(BGPEvpnEsiLabelExtendedCommunity, self, kwargs) + if label: + # If binary type label field value is specified, stores it + # and decodes as MPLS label and VNI. + self._label = label + self._mpls_label, _ = mpls.label_from_bin(label) + self._vni = vxlan.vni_from_bin(label) + else: + # If either MPLS label or VNI is specified, stores it + # and encodes into binary type label field value. + self._label = self._serialize_label(mpls_label, vni) + self._mpls_label = mpls_label + self._vni = vni + + def _serialize_label(self, mpls_label, vni): + if mpls_label: + return mpls.label_to_bin(mpls_label, is_bos=True) + elif vni: + return vxlan.vni_to_bin(vni) + else: + return b'\x00' * 3 + @classmethod def parse_value(cls, buf): (subtype, flags, - esi_label) = struct.unpack_from(cls._VALUE_PACK_STR, buf) + label) = struct.unpack_from(cls._VALUE_PACK_STR, buf) return { 'subtype': subtype, 'flags': flags, - 'esi_label': type_desc.Int3.to_user(esi_label), + 'label': label, } def serialize_value(self): return struct.pack(self._VALUE_PACK_STR, self.subtype, self.flags, - type_desc.Int3.from_user(self.esi_label)) + self._label) + + @property + def mpls_label(self): + return self._mpls_label + + @mpls_label.setter + def mpls_label(self, mpls_label): + self._label = mpls.label_to_bin(mpls_label, is_bos=True) + self._mpls_label = mpls_label + self._vni = None # disables VNI + + @property + def vni(self): + return self._vni + + @vni.setter + def vni(self, vni): + self._label = vxlan.vni_to_bin(vni) + self._mpls_label = None # disables ESI label + self._vni = vni @_ExtendedCommunity.register_type(_ExtendedCommunity.EVPN_ES_IMPORT_RT) @@ -3130,6 +4465,277 @@ addrconv.mac.text_to_bin(self.es_import)) +@_ExtendedCommunity.register_type(_ExtendedCommunity.FLOWSPEC_TRAFFIC_RATE) +class BGPFlowSpecTrafficRateCommunity(_ExtendedCommunity): + """ + Flow Specification Traffic Filtering Actions for Traffic Rate. + + ========================== =============================================== + Attribute Description + ========================== =============================================== + as_number Autonomous System number. + rate_info rate information. + ========================== =============================================== + """ + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type=0x80 | Sub-Type=0x06 | AS number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Rate information | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _VALUE_PACK_STR = '!BHf' + _VALUE_FIELDS = ['subtype', 'as_number', 'rate_info'] + ACTION_NAME = 'traffic_rate' + + def __init__(self, **kwargs): + super(BGPFlowSpecTrafficRateCommunity, self).__init__() + kwargs['subtype'] = self.SUBTYPE_FLOWSPEC_TRAFFIC_RATE + self.do_init(BGPFlowSpecTrafficRateCommunity, self, kwargs) + + @classmethod + def parse_value(cls, buf): + (subtype, as_number, + rate_info) = struct.unpack_from(cls._VALUE_PACK_STR, buf) + return { + 'subtype': subtype, + 'as_number': as_number, + 'rate_info': rate_info, + } + + def serialize_value(self): + return struct.pack(self._VALUE_PACK_STR, self.subtype, + self.as_number, self.rate_info) + + +@_ExtendedCommunity.register_type(_ExtendedCommunity.FLOWSPEC_TRAFFIC_ACTION) +class BGPFlowSpecTrafficActionCommunity(_ExtendedCommunity): + """ + Flow Specification Traffic Filtering Actions for Traffic Action. + + ========================== =============================================== + Attribute Description + ========================== =============================================== + action Apply action. + The supported action are + ``SAMPLE`` and ``TERMINAL``. + ========================== =============================================== + """ + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type=0x80 | Sub-Type=0x07 | Traffic-action | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Traffic-action Cont'd | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + # Traffic-action format + # 40 41 42 43 44 45 46 47 + # +---+---+---+---+---+---+---+---+ + # | reserved | S | T | + # +---+---+---+---+---+---+---+---+ + + _VALUE_PACK_STR = '!B5xB' + _VALUE_FIELDS = ['subtype', 'action'] + ACTION_NAME = 'traffic_action' + SAMPLE = 1 << 1 + TERMINAL = 1 << 0 + + def __init__(self, **kwargs): + super(BGPFlowSpecTrafficActionCommunity, self).__init__() + kwargs['subtype'] = self.SUBTYPE_FLOWSPEC_TRAFFIC_ACTION + self.do_init(BGPFlowSpecTrafficActionCommunity, self, kwargs) + + +@_ExtendedCommunity.register_type(_ExtendedCommunity.FLOWSPEC_REDIRECT) +class BGPFlowSpecRedirectCommunity(BGPTwoOctetAsSpecificExtendedCommunity): + """ + Flow Specification Traffic Filtering Actions for Redirect. + + ========================== =============================================== + Attribute Description + ========================== =============================================== + as_number Autonomous System number. + local_administrator Local Administrator. + ========================== =============================================== + """ + ACTION_NAME = 'redirect' + + def __init__(self, **kwargs): + super(BGPTwoOctetAsSpecificExtendedCommunity, self).__init__() + kwargs['subtype'] = self.SUBTYPE_FLOWSPEC_REDIRECT + self.do_init(BGPTwoOctetAsSpecificExtendedCommunity, self, kwargs) + + +@_ExtendedCommunity.register_type( + _ExtendedCommunity.FLOWSPEC_TRAFFIC_REMARKING) +class BGPFlowSpecTrafficMarkingCommunity(_ExtendedCommunity): + """ + Flow Specification Traffic Filtering Actions for Traffic Marking. + + ========================== =============================================== + Attribute Description + ========================== =============================================== + dscp Differentiated Services Code Point. + ========================== =============================================== + """ + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type=0x80 | Sub-Type=0x09 | Reserved=0 | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Reserved=0 | Dscp | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _VALUE_PACK_STR = '!B5xB' + _VALUE_FIELDS = ['subtype', 'dscp'] + ACTION_NAME = 'traffic_marking' + + def __init__(self, **kwargs): + super(BGPFlowSpecTrafficMarkingCommunity, self).__init__() + kwargs['subtype'] = self.SUBTYPE_FLOWSPEC_TRAFFIC_REMARKING + self.do_init(BGPFlowSpecTrafficMarkingCommunity, self, kwargs) + + @classmethod + def parse_value(cls, buf): + (subtype, dscp) = struct.unpack_from(cls._VALUE_PACK_STR, buf) + return { + 'subtype': subtype, + 'dscp': dscp, + } + + def serialize_value(self): + return struct.pack(self._VALUE_PACK_STR, self.subtype, self.dscp) + + +# TODO +# Implement "Redirect-IPv6" [draft-ietf-idr-flow-spec-v6-08] + + +@_ExtendedCommunity.register_type( + _ExtendedCommunity.FLOWSPEC_VLAN_ACTION) +class BGPFlowSpecVlanActionCommunity(_ExtendedCommunity): + """ + Flow Specification Vlan Actions. + ========= =============================================== + Attribute Description + ========= =============================================== + actions_1 Bit representation of actions. + Supported actions are + ``POP``, ``PUSH``, ``SWAP``, ``REWRITE_INNER``, ``REWRITE_OUTER``. + actions_2 Same as ``actions_1``. + vlan_1 VLAN ID used by ``actions_1``. + cos_1 Class of Service used by ``actions_1``. + vlan_2 VLAN ID used by ``actions_2``. + cos_2 Class of Service used by ``actions_2``. + ========= =============================================== + """ + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type=0x08 | Sub-Type=0x0a |PO1|PU1|SW1|RT1|RO1|...|PO2|...| + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | VLAN ID1 | COS1 |0| VLAN ID2 | COS2 |0| + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _VALUE_PACK_STR = '!BBBHH' + _VALUE_FIELDS = [ + 'subtype', + 'actions_1', + 'actions_2', + 'vlan_1', + 'vlan_2', + 'cos_1', + 'cos_2'] + ACTION_NAME = 'vlan_action' + _COS_MASK = 0x07 + + POP = 1 << 7 + PUSH = 1 << 6 + SWAP = 1 << 5 + REWRITE_INNER = 1 << 4 + REWRITE_OUTER = 1 << 3 + + def __init__(self, **kwargs): + super(BGPFlowSpecVlanActionCommunity, self).__init__() + kwargs['subtype'] = self.SUBTYPE_FLOWSPEC_VLAN_ACTION + self.do_init(BGPFlowSpecVlanActionCommunity, self, kwargs) + + @classmethod + def parse_value(cls, buf): + (subtype, actions_1, actions_2, + vlan_cos_1, vlan_cos_2) = struct.unpack_from(cls._VALUE_PACK_STR, buf) + + return { + 'subtype': subtype, + 'actions_1': actions_1, + 'vlan_1': int(vlan_cos_1 >> 4), + 'cos_1': int((vlan_cos_1 >> 1) & cls._COS_MASK), + 'actions_2': actions_2, + 'vlan_2': int(vlan_cos_2 >> 4), + 'cos_2': int((vlan_cos_2 >> 1) & cls._COS_MASK) + } + + def serialize_value(self): + return struct.pack( + self._VALUE_PACK_STR, + self.subtype, + self.actions_1, + self.actions_2, + (self.vlan_1 << 4) + (self.cos_1 << 1), + (self.vlan_2 << 4) + (self.cos_2 << 1), + ) + + +@_ExtendedCommunity.register_type( + _ExtendedCommunity.FLOWSPEC_TPID_ACTION) +class BGPFlowSpecTPIDActionCommunity(_ExtendedCommunity): + """ + Flow Specification TPID Actions. + ========= ========================================================= + Attribute Description + ========= ========================================================= + actions Bit representation of actions. + Supported actions are + ``TI(inner TPID action)`` and ``TO(outer TPID action)``. + tpid_1 TPID used by ``TI``. + tpid_2 TPID used by ``TO``. + ========= ========================================================= + """ + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Type=0x08 | Sub-Type=0x0b |TI|TO| Reserved=0 | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | TPID1 | TPID2 | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _VALUE_PACK_STR = '!BHHH' + _VALUE_FIELDS = ['subtype', 'actions', 'tpid_1', 'tpid_2'] + ACTION_NAME = 'tpid_action' + + TI = 1 << 15 + TO = 1 << 14 + + def __init__(self, **kwargs): + super(BGPFlowSpecTPIDActionCommunity, self).__init__() + kwargs['subtype'] = self.SUBTYPE_FLOWSPEC_TPID_ACTION + self.do_init(BGPFlowSpecTPIDActionCommunity, self, kwargs) + + @classmethod + def parse_value(cls, buf): + (subtype, actions, tpid_1, tpid_2) = struct.unpack_from( + cls._VALUE_PACK_STR, buf) + + return { + 'subtype': subtype, + 'actions': actions, + 'tpid_1': tpid_1, + 'tpid_2': tpid_2, + } + + def serialize_value(self): + return struct.pack( + self._VALUE_PACK_STR, + self.subtype, + self.actions, + self.tpid_1, + self.tpid_2, + ) + + @_ExtendedCommunity.register_unknown_type() class BGPUnknownExtendedCommunity(_ExtendedCommunity): _VALUE_PACK_STR = '!7s' # opaque value @@ -3147,6 +4753,7 @@ _RESERVED_LENGTH = 1 _ATTR_FLAGS = BGP_ATTR_FLAG_OPTIONAL _class_suffixes = ['AddrPrefix'] + _opt_attributes = ['next_hop'] _TYPE = { 'ascii': [ 'next_hop' @@ -3159,10 +4766,18 @@ flags=flags, type_=type_, length=length) self.afi = afi self.safi = safi - if (not netaddr.valid_ipv4(next_hop) - and not netaddr.valid_ipv6(next_hop)): - raise ValueError('Invalid address for next_hop: %s' % next_hop) - self.next_hop = next_hop + if not isinstance(next_hop, (list, tuple)): + next_hop = [next_hop] + for n in next_hop: + if not netaddr.valid_ipv4(n) and not netaddr.valid_ipv6(n): + raise ValueError('Invalid address for next_hop: %s' % n) + # Note: For the backward compatibility, stores the first next_hop + # address and all next_hop addresses separately. + if next_hop: + self._next_hop = next_hop[0] + else: + self._next_hop = None + self._next_hop_list = next_hop self.nlri = nlri addr_cls = _get_addr_class(afi, safi) for i in nlri: @@ -3170,6 +4785,25 @@ raise ValueError('Invalid NRLI class for afi=%d and safi=%d' % (self.afi, self.safi)) + @staticmethod + def split_bin_with_len(buf, unit_len): + f = io.BytesIO(buf) + return [f.read(unit_len) for _ in range(0, len(buf), unit_len)] + + @classmethod + def parse_next_hop_ipv4(cls, buf, unit_len): + next_hop = [] + for next_hop_bin in cls.split_bin_with_len(buf, unit_len): + next_hop.append(addrconv.ipv4.bin_to_text(next_hop_bin[-4:])) + return next_hop + + @classmethod + def parse_next_hop_ipv6(cls, buf, unit_len): + next_hop = [] + for next_hop_bin in cls.split_bin_with_len(buf, unit_len): + next_hop.append(addrconv.ipv6.bin_to_text(next_hop_bin[-16:])) + return next_hop + @classmethod def parse_value(cls, buf): (afi, safi, next_hop_len,) = struct.unpack_from( @@ -3189,16 +4823,22 @@ nlri.append(n) rf = RouteFamily(afi, safi) - if rf == RF_IPv6_VPN: - next_hop = addrconv.ipv6.bin_to_text(next_hop_bin[cls._RD_LENGTH:]) - next_hop_len -= cls._RD_LENGTH - elif rf == RF_IPv4_VPN: - next_hop = addrconv.ipv4.bin_to_text(next_hop_bin[cls._RD_LENGTH:]) - next_hop_len -= cls._RD_LENGTH - elif afi == addr_family.IP or (rf == RF_L2_EVPN and next_hop_len == 4): - next_hop = addrconv.ipv4.bin_to_text(next_hop_bin) - elif afi == addr_family.IP6 or (rf == RF_L2_EVPN and next_hop_len > 4): - next_hop = addrconv.ipv6.bin_to_text(next_hop_bin) + if rf == RF_IPv4_VPN: + next_hop = cls.parse_next_hop_ipv4(next_hop_bin, + cls._RD_LENGTH + 4) + next_hop_len -= cls._RD_LENGTH * len(next_hop) + elif rf == RF_IPv6_VPN: + next_hop = cls.parse_next_hop_ipv6(next_hop_bin, + cls._RD_LENGTH + 16) + next_hop_len -= cls._RD_LENGTH * len(next_hop) + elif (afi == addr_family.IP + or (rf == RF_L2_EVPN and next_hop_len < 16)): + next_hop = cls.parse_next_hop_ipv4(next_hop_bin, 4) + elif (afi == addr_family.IP6 + or (rf == RF_L2_EVPN and next_hop_len >= 16)): + next_hop = cls.parse_next_hop_ipv6(next_hop_bin, 16) + elif rf == RF_L2VPN_FLOWSPEC: + next_hop = [] else: raise ValueError('Invalid address family: afi=%d, safi=%d' % (afi, safi)) @@ -3210,13 +4850,21 @@ 'nlri': nlri, } + def serialize_next_hop(self): + buf = bytearray() + for next_hop in self.next_hop_list: + if self.afi == addr_family.IP6: + next_hop = str(netaddr.IPAddress(next_hop).ipv6()) + next_hop_bin = ip.text_to_bin(next_hop) + if RouteFamily(self.afi, self.safi) in (RF_IPv4_VPN, RF_IPv6_VPN): + # Empty label stack(RD=0:0) + IP address + next_hop_bin = b'\x00' * self._RD_LENGTH + next_hop_bin + buf += next_hop_bin + + return buf + def serialize_value(self): - if self.afi == addr_family.IP6: - self.next_hop = str(netaddr.IPAddress(self.next_hop).ipv6()) - next_hop_bin = ip.text_to_bin(self.next_hop) - if RouteFamily(self.afi, self.safi) in (RF_IPv4_VPN, RF_IPv6_VPN): - # Empty label stack(RD=0:0) + IP address - next_hop_bin = b'\x00' * self._RD_LENGTH + next_hop_bin + next_hop_bin = self.serialize_next_hop() # fixup next_hop_len = len(next_hop_bin) @@ -3235,6 +4883,31 @@ return buf @property + def next_hop(self): + return self._next_hop + + @next_hop.setter + def next_hop(self, addr): + if not netaddr.valid_ipv4(addr) and not netaddr.valid_ipv6(addr): + raise ValueError('Invalid address for next_hop: %s' % addr) + self._next_hop = addr + self.next_hop_list[0] = addr + + @property + def next_hop_list(self): + return self._next_hop_list + + @next_hop_list.setter + def next_hop_list(self, addr_list): + if not isinstance(addr_list, (list, tuple)): + addr_list = [addr_list] + for addr in addr_list: + if not netaddr.valid_ipv4(addr) and not netaddr.valid_ipv6(addr): + raise ValueError('Invalid address for next_hop: %s' % addr) + self._next_hop = addr_list[0] + self._next_hop_list = addr_list + + @property def route_family(self): return _rf_map[(self.afi, self.safi)] @@ -3351,7 +5024,7 @@ # If binary type label field value is specified, stores it # and decodes as MPLS label and VNI. self._label = label - self._mpls_label = mpls.label_from_bin(label) + self._mpls_label, _ = mpls.label_from_bin(label) self._vni = vxlan.vni_from_bin(label) else: # If either MPLS label or VNI is specified, stores it @@ -3438,7 +5111,7 @@ return ins -class _PmsiTunnelId(StringifyMixin, _TypeDisp): +class _PmsiTunnelId(StringifyMixin, TypeDisp): @classmethod def parse(cls, tunnel_type, buf): @@ -3490,9 +5163,9 @@ @classmethod def parser(cls, buf): - (tunnel_endpoint_ip, - ) = struct.unpack_from(cls._VALUE_PACK_STR % len(buf), - six.binary_type(buf)) + (tunnel_endpoint_ip, ) = struct.unpack_from( + cls._VALUE_PACK_STR % len(buf), + six.binary_type(buf)) return cls(tunnel_endpoint_ip=ip.bin_to_text(tunnel_endpoint_ip)) def serialize(self): @@ -3505,7 +5178,7 @@ pass -class BGPMessage(packet_base.PacketBase, _TypeDisp): +class BGPMessage(packet_base.PacketBase, TypeDisp): """Base class for BGP-4 messages. An instance has the following attributes at least. @@ -3553,7 +5226,7 @@ subcls = cls._lookup_type(type_) kwargs = subcls.parser(binmsg) return subcls(marker=marker, len_=len_, type_=type_, - **kwargs), None, rest + **kwargs), cls, rest def serialize(self, payload=None, prev=None): # fixup @@ -3585,18 +5258,20 @@ ========================== =============================================== marker Marker field. Ignored when encoding. len Length field. Ignored when encoding. - type Type field. The default is BGP_MSG_OPEN. - version Version field. The default is 4. - my_as My Autonomous System field. 2 octet unsigned - integer. - hold_time Hold Time field. 2 octet unsigned integer. - The default is 0. - bgp_identifier BGP Identifier field. An IPv4 address. + type Type field. + version Version field. + my_as My Autonomous System field. + 2 octet unsigned integer. + hold_time Hold Time field. + 2 octet unsigned integer. + bgp_identifier BGP Identifier field. + An IPv4 address. For example, '192.0.2.1' opt_param_len Optional Parameters Length field. Ignored when encoding. - opt_param Optional Parameters field. A list of - BGPOptParam instances. The default is []. + opt_param Optional Parameters field. + A list of BGPOptParam instances. + The default is []. ========================== =============================================== """ @@ -3689,16 +5364,16 @@ ========================== =============================================== marker Marker field. Ignored when encoding. len Length field. Ignored when encoding. - type Type field. The default is BGP_MSG_UPDATE. + type Type field. withdrawn_routes_len Withdrawn Routes Length field. Ignored when encoding. - withdrawn_routes Withdrawn Routes field. A list of - BGPWithdrawnRoute instances. + withdrawn_routes Withdrawn Routes field. + A list of BGPWithdrawnRoute instances. The default is []. total_path_attribute_len Total Path Attribute Length field. Ignored when encoding. - path_attributes Path Attributes field. A list of - BGPPathAttribute instances. + path_attributes Path Attributes field. + A list of BGPPathAttribute instances. The default is []. nlri Network Layer Reachability Information field. A list of BGPNLRI instances. @@ -3808,7 +5483,7 @@ ========================== =============================================== marker Marker field. Ignored when encoding. len Length field. Ignored when encoding. - type Type field. The default is BGP_MSG_KEEPALIVE. + type Type field. ========================== =============================================== """ @@ -3840,11 +5515,10 @@ ========================== =============================================== marker Marker field. Ignored when encoding. len Length field. Ignored when encoding. - type Type field. The default is - BGP_MSG_NOTIFICATION. + type Type field. error_code Error code field. error_subcode Error subcode field. - data Data field. The default is ''. + data Data field. ========================== =============================================== """ @@ -3933,8 +5607,7 @@ ========================== =============================================== marker Marker field. Ignored when encoding. len Length field. Ignored when encoding. - type Type field. The default is - BGP_MSG_ROUTE_REFRESH. + type Type field. afi Address Family Identifier safi Subsequent Address Family Identifier ========================== =============================================== diff -Nru ryu-4.9/ryu/lib/packet/bmp.py ryu-4.15/ryu/lib/packet/bmp.py --- ryu-4.9/ryu/lib/packet/bmp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/bmp.py 2017-07-02 11:08:32.000000000 +0000 @@ -17,13 +17,15 @@ BGP Monitoring Protocol draft-ietf-grow-bmp-07 """ -import six import struct +import six + +from ryu.lib import addrconv from ryu.lib.packet import packet_base from ryu.lib.packet import stream_parser from ryu.lib.packet.bgp import BGPMessage -from ryu.lib import addrconv +from ryu.lib.type_desc import TypeDisp VERSION = 3 @@ -66,44 +68,7 @@ BMP_PEER_DOWN_REASON_REMOTE_NO_NOTIFICATION = 4 -class _TypeDisp(object): - _TYPES = {} - _REV_TYPES = None - _UNKNOWN_TYPE = None - - @classmethod - def register_unknown_type(cls): - def _register_type(subcls): - cls._UNKNOWN_TYPE = subcls - return subcls - return _register_type - - @classmethod - def register_type(cls, type_): - cls._TYPES = cls._TYPES.copy() - - def _register_type(subcls): - cls._TYPES[type_] = subcls - cls._REV_TYPES = None - return subcls - return _register_type - - @classmethod - def _lookup_type(cls, type_): - try: - return cls._TYPES[type_] - except KeyError: - return cls._UNKNOWN_TYPE - - @classmethod - def _rev_lookup_type(cls, targ_cls): - if cls._REV_TYPES is None: - rev = dict((v, k) for k, v in cls._TYPES.items()) - cls._REV_TYPES = rev - return cls._REV_TYPES[targ_cls] - - -class BMPMessage(packet_base.PacketBase, _TypeDisp): +class BMPMessage(packet_base.PacketBase, TypeDisp): """Base class for BGP Monitoring Protocol messages. An instance has the following attributes at least. diff -Nru ryu-4.9/ryu/lib/packet/dhcp6.py ryu-4.15/ryu/lib/packet/dhcp6.py --- ryu-4.9/ryu/lib/packet/dhcp6.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/dhcp6.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,288 @@ +# Copyright (C) 2016 Bouygues Telecom. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +DHCPv6 packet parser/serializer + +[RFC 3315] DHCPv6 packet format: + +The following diagram illustrates the format of DHCP messages sent +between clients and servers:: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | msg_type | transaction_id | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + . options . + . (variable) . + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +There are two relay agent messages, which share the following format:: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | msg_type | hop_count | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | + | | + | link_address | + | | + | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-| + | | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | + | | + | peer_address | + | | + | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-| + | | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | + . . + . options (variable number and length) .... . + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +""" +import random +import struct + +from . import packet_base +from ryu.lib import addrconv +from ryu.lib import stringify + +# DHCPv6 message types +DHCPV6_SOLICIT = 1 +DHCPV6_ADVERTISE = 2 +DHCPV6_REQUEST = 3 +DHCPV6_CONFIRM = 4 +DHCPV6_RENEW = 5 +DHCPV6_REBIND = 6 +DHCPV6_REPLY = 7 +DHCPV6_RELEASE = 8 +DHCPV6_DECLINE = 9 +DHCPV6_RECONFIGURE = 10 +DHCPV6_INFORMATION_REQUEST = 11 +DHCPV6_RELAY_FORW = 12 +DHCPV6_RELAY_REPL = 13 + +# DHCPv6 option-codes +DHCPV6_OPTION_CLIENTID = 1 +DHCPV6_OPTION_SERVERID = 2 +DHCPV6_OPTION_IA_NA = 3 +DHCPV6_OPTION_IA_TA = 4 +DHCPV6_OPTION_IAADDR = 5 +DHCPV6_OPTION_ORO = 6 +DHCPV6_OPTION_PREFERENCE = 7 +DHCPV6_OPTION_ELAPSED_TIME = 8 +DHCPV6_OPTION_RELAY_MSG = 9 +DHCPV6_OPTION_AUTH = 11 +DHCPV6_OPTION_UNICAST = 12 +DHCPV6_OPTION_STATUS_CODE = 13 +DHCPV6_OPTION_RAPID_COMMIT = 14 +DHCPV6_OPTION_USER_CLASS = 15 +DHCPV6_OPTION_VENDOR_CLASS = 16 +DHCPV6_OPTION_VENDOR_OPTS = 17 +DHCPV6_OPTION_INTERFACE_ID = 18 +DHCPV6_OPTION_RECONF_MSG = 19 +DHCPV6_OPTION_RECONF_ACCEPT = 20 + + +class dhcp6(packet_base.PacketBase): + """DHCPv6 (RFC 3315) header encoder/decoder class. + + The serialized packet would looks like the ones described + in the following sections. + + * RFC 3315 DHCP packet format + + An instance has the following attributes at least. + Most of them are same to the on-wire counterparts but in host byte order. + __init__ takes the corresponding args in this order. + + + ============== ==================== + Attribute Description + ============== ==================== + msg_type Identifies the DHCP message type + transaction_id For unrelayed messages only: the transaction ID for\ + this message exchange. + hop_count For relayed messages only: number of relay agents that\ + have relayed this message. + link_address For relayed messages only: a global or site-local address\ + that will be used by the server to identify the link on\ + which the client is located. + peer_address For relayed messages only: the address of the client or\ + relay agent from which the message to be relayed was\ + received. + options Options carried in this message + ============== ==================== + """ + _MIN_LEN = 8 + _DHCPV6_UNPACK_STR = '!I' + _DHCPV6_RELAY_UNPACK_STR = '!H16s16s' + _DHCPV6_UNPACK_STR_LEN = struct.calcsize(_DHCPV6_UNPACK_STR) + _DHCPV6_RELAY_UNPACK_STR_LEN = struct.calcsize(_DHCPV6_RELAY_UNPACK_STR) + _DHCPV6_PACK_STR = '!I' + _DHCPV6_RELAY_PACK_STR = '!H16s16s' + + def __init__(self, msg_type, options, transaction_id=None, hop_count=0, + link_address='::', peer_address='::'): + super(dhcp6, self).__init__() + self.msg_type = msg_type + self.options = options + if transaction_id is None: + self.transaction_id = random.randint(0, 0xffffff) + else: + self.transaction_id = transaction_id + self.hop_count = hop_count + self.link_address = link_address + self.peer_address = peer_address + + @classmethod + def parser(cls, buf): + (msg_type, ) = struct.unpack_from('!B', buf) + + buf = b'\x00' + buf[1:] # unpack xid as a 4-byte integer + if msg_type == DHCPV6_RELAY_FORW or msg_type == DHCPV6_RELAY_REPL: + (hop_count, link_address, peer_address) \ + = struct.unpack_from(cls._DHCPV6_RELAY_UNPACK_STR, buf) + length = struct.calcsize(cls._DHCPV6_RELAY_UNPACK_STR) + else: + (transaction_id, ) \ + = struct.unpack_from(cls._DHCPV6_UNPACK_STR, buf) + length = struct.calcsize(cls._DHCPV6_UNPACK_STR) + + if len(buf) > length: + parse_opt = options.parser(buf[length:]) + length += parse_opt.options_len + if msg_type == DHCPV6_RELAY_FORW or msg_type == DHCPV6_RELAY_REPL: + return (cls(msg_type, parse_opt, 0, hop_count, + addrconv.ipv6.bin_to_text(link_address), + addrconv.ipv6.bin_to_text(peer_address)), + None, buf[length:]) + else: + return (cls(msg_type, parse_opt, transaction_id), + None, buf[length:]) + else: + return None, None, buf + + def serialize(self, payload=None, prev=None): + seri_opt = self.options.serialize() + if (self.msg_type == DHCPV6_RELAY_FORW or + self.msg_type == DHCPV6_RELAY_REPL): + pack_str = '%s%ds' % (self._DHCPV6_RELAY_PACK_STR, + self.options.options_len) + buf = struct.pack(pack_str, self.hop_count, + addrconv.ipv6.text_to_bin(self.link_address), + addrconv.ipv6.text_to_bin(self.peer_address), + seri_opt) + else: + pack_str = '%s%ds' % (self._DHCPV6_PACK_STR, + self.options.options_len) + buf = struct.pack(pack_str, self.transaction_id, seri_opt) + return struct.pack('!B', self.msg_type) + buf[1:] + + +class options(stringify.StringifyMixin): + """DHCP (RFC 3315) options encoder/decoder class. + + This is used with ryu.lib.packet.dhcp6.dhcp6. + """ + + def __init__(self, option_list=None, options_len=0): + super(options, self).__init__() + if option_list is None: + self.option_list = [] + else: + self.option_list = option_list + self.options_len = options_len + + @classmethod + def parser(cls, buf): + opt_parse_list = [] + offset = 0 + while len(buf) > offset: + opt_buf = buf[offset:] + opt = option.parser(opt_buf) + opt_parse_list.append(opt) + offset += opt.length + 4 + return cls(opt_parse_list, len(buf)) + + def serialize(self): + seri_opt = bytes() + for opt in self.option_list: + seri_opt += opt.serialize() + if self.options_len == 0: + self.options_len = len(seri_opt) + return seri_opt + + +class option(stringify.StringifyMixin): + """DHCP (RFC 3315) options encoder/decoder class. + + This is used with ryu.lib.packet.dhcp6.dhcp6.options. + + An instance has the following attributes at least. + Most of them are same to the on-wire counterparts but in host byte order. + __init__ takes the corresponding args in this order. + + The format of DHCP options is:: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | option-code | option-len | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | option-data | + | (option-len octets) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + ============== ==================== + Attribute Description + ============== ==================== + option-code An unsigned integer identifying the specific option\ + type carried in this option. + option-len An unsigned integer giving the length of the\ + option-data field in this option in octets. + option-data The data for the option; the format of this data\ + depends on the definition of the option. + ============== ==================== + """ + _UNPACK_STR = '!H' + _UNPACK_STR_LEN = struct.calcsize(_UNPACK_STR) + _PACK_STR = '!HH%ds' + + def __init__(self, code, data, length=0): + super(option, self).__init__() + self.code = code + self.data = data + self.length = length + + @classmethod + def parser(cls, buf): + code = struct.unpack_from(cls._UNPACK_STR, buf)[0] + buf = buf[cls._UNPACK_STR_LEN:] + length = struct.unpack_from(cls._UNPACK_STR, buf)[0] + buf = buf[cls._UNPACK_STR_LEN:] + value_unpack_str = '%ds' % length + data = struct.unpack_from(value_unpack_str, buf)[0] + return cls(code, data, length) + + def serialize(self): + if self.length == 0: + self.length = len(self.data) + options_pack_str = self._PACK_STR % self.length + return struct.pack(options_pack_str, self.code, self.length, self.data) diff -Nru ryu-4.9/ryu/lib/packet/dhcp.py ryu-4.15/ryu/lib/packet/dhcp.py --- ryu-4.9/ryu/lib/packet/dhcp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/dhcp.py 2017-07-02 11:08:32.000000000 +0000 @@ -15,51 +15,49 @@ """ DHCP packet parser/serializer - -RFC 2131 -DHCP packet format - - 0 1 2 3 - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | op (1) | htype (1) | hlen (1) | hops (1) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | xid (4) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | secs (2) | flags (2) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | ciaddr (4) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | yiaddr (4) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | siaddr (4) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | giaddr (4) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | - | chaddr (16) | - | | - | | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | - | sname (64) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | - | file (128) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | | - | options (variable) | - +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - """ -import binascii +# RFC 2131 +# DHCP packet format +# 0 1 2 3 +# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | op (1) | htype (1) | hlen (1) | hops (1) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | xid (4) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | secs (2) | flags (2) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | ciaddr (4) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | yiaddr (4) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | siaddr (4) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | giaddr (4) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | | +# | chaddr (16) | +# | | +# | | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | | +# | sname (64) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | | +# | file (128) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +# | | +# | options (variable) | +# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + import random import struct -from . import packet_base +import netaddr + from ryu.lib import addrconv from ryu.lib import stringify - +from . import packet_base DHCP_BOOT_REQUEST = 1 DHCP_BOOT_REPLY = 2 @@ -76,6 +74,8 @@ DHCP_GATEWAY_ADDR_OPT = 3 DHCP_DNS_SERVER_ADDR_OPT = 6 DHCP_HOST_NAME_OPT = 12 +DHCP_DOMAIN_NAME_OPT = 15 +DHCP_INTERFACE_MTU_OPT = 26 DHCP_REQUESTED_IP_ADDR_OPT = 50 DHCP_IP_ADDR_LEASE_TIME_OPT = 51 DHCP_MESSAGE_TYPE_OPT = 53 @@ -83,6 +83,7 @@ DHCP_PARAMETER_REQUEST_LIST_OPT = 55 DHCP_RENEWAL_TIME_OPT = 58 DHCP_REBINDING_TIME_OPT = 59 +DHCP_CLASSLESS_ROUTE_OPT = 121 DHCP_END_OPT = 255 @@ -133,31 +134,26 @@ every DHCP message). ============== ==================== """ - _MIN_LEN = 236 - _HLEN_UNPACK_STR = '!BBB' - _HLEN_UNPACK_LEN = struct.calcsize(_HLEN_UNPACK_STR) - _DHCP_UNPACK_STR = '!BIHH4s4s4s4s%ds%ds64s128s' _DHCP_PACK_STR = '!BBBBIHH4s4s4s4s16s64s128s' - _DHCP_CHADDR_LEN = 16 + _MIN_LEN = struct.calcsize(_DHCP_PACK_STR) + _MAC_ADDRESS_LEN = 6 _HARDWARE_TYPE_ETHERNET = 1 _class_prefixes = ['options'] _TYPE = { 'ascii': [ - 'ciaddr', 'yiaddr', 'siaddr', 'giaddr', 'chaddr', 'sname' + 'ciaddr', 'yiaddr', 'siaddr', 'giaddr', 'chaddr', + 'sname', 'boot_file' ] } - def __init__(self, op, chaddr, options, htype=_HARDWARE_TYPE_ETHERNET, + def __init__(self, op, chaddr, options=None, htype=_HARDWARE_TYPE_ETHERNET, hlen=0, hops=0, xid=None, secs=0, flags=0, ciaddr='0.0.0.0', yiaddr='0.0.0.0', siaddr='0.0.0.0', - giaddr='0.0.0.0', sname='', boot_file=b''): + giaddr='0.0.0.0', sname='', boot_file=''): super(dhcp, self).__init__() self.op = op self.htype = htype - if hlen == 0: - self.hlen = len(addrconv.mac.text_to_bin(chaddr)) - else: - self.hlen = hlen + self.hlen = hlen self.hops = hops if xid is None: self.xid = random.randint(0, 0xffffffff) @@ -175,47 +171,46 @@ self.options = options @classmethod - def _parser(cls, buf): - (op, htype, hlen) = struct.unpack_from(cls._HLEN_UNPACK_STR, buf) - buf = buf[cls._HLEN_UNPACK_LEN:] - unpack_str = cls._DHCP_UNPACK_STR % (hlen, - (cls._DHCP_CHADDR_LEN - hlen)) - min_len = struct.calcsize(unpack_str) - (hops, xid, secs, flags, ciaddr, yiaddr, siaddr, giaddr, chaddr, - dummy, sname, boot_file - ) = struct.unpack_from(unpack_str, buf) - length = min_len - if len(buf) > min_len: - parse_opt = options.parser(buf[min_len:]) + def parser(cls, buf): + (op, htype, hlen, hops, xid, secs, flags, + ciaddr, yiaddr, siaddr, giaddr, chaddr, sname, + boot_file) = struct.unpack_from(cls._DHCP_PACK_STR, buf) + + if hlen == cls._MAC_ADDRESS_LEN: + chaddr = addrconv.mac.bin_to_text(chaddr[:cls._MAC_ADDRESS_LEN]) + + length = cls._MIN_LEN + parse_opt = None + if len(buf) > length: + parse_opt = options.parser(buf[length:]) length += parse_opt.options_len - return (cls(op, addrconv.mac.bin_to_text(chaddr), parse_opt, + return (cls(op, chaddr, parse_opt, htype, hlen, hops, xid, secs, flags, addrconv.ipv4.bin_to_text(ciaddr), addrconv.ipv4.bin_to_text(yiaddr), addrconv.ipv4.bin_to_text(siaddr), addrconv.ipv4.bin_to_text(giaddr), - sname.decode('ascii'), boot_file), + sname.decode('ascii'), boot_file.decode('ascii')), None, buf[length:]) - @classmethod - def parser(cls, buf): - try: - return cls._parser(buf) - except: - return None, None, buf - - def serialize(self, payload, prev): - seri_opt = self.options.serialize() - pack_str = '%s%ds' % (self._DHCP_PACK_STR, - self.options.options_len) - return struct.pack(pack_str, self.op, self.htype, self.hlen, + def serialize(self, _payload=None, _prev=None): + opt_buf = bytearray() + if self.options is not None: + opt_buf = self.options.serialize() + if netaddr.valid_mac(self.chaddr): + chaddr = addrconv.mac.text_to_bin(self.chaddr) + else: + chaddr = self.chaddr + self.hlen = len(chaddr) + return struct.pack(self._DHCP_PACK_STR, self.op, self.htype, self.hlen, self.hops, self.xid, self.secs, self.flags, addrconv.ipv4.text_to_bin(self.ciaddr), addrconv.ipv4.text_to_bin(self.yiaddr), addrconv.ipv4.text_to_bin(self.siaddr), addrconv.ipv4.text_to_bin(self.giaddr), - addrconv.mac.text_to_bin(self.chaddr), - self.sname.encode('ascii'), self.boot_file, seri_opt) + chaddr, + self.sname.encode('ascii'), + self.boot_file.encode('ascii')) + opt_buf class options(stringify.StringifyMixin): @@ -255,10 +250,7 @@ def __init__(self, option_list=None, options_len=0, magic_cookie=_MAGIC_COOKIE): super(options, self).__init__() - if option_list is None: - self.option_list = [] - else: - self.option_list = option_list + self.option_list = option_list or [] self.options_len = options_len self.magic_cookie = magic_cookie @@ -269,7 +261,11 @@ magic_cookie = struct.unpack_from(cls._MAGIC_COOKIE_UNPACK_STR, buf)[0] while len(buf) > offset: opt_buf = buf[offset:] - opt = option.parser(opt_buf) + try: + opt = option.parser(opt_buf) + except struct.error: + opt_parse_list.append(opt_buf) + break if opt is None: break opt_parse_list.append(opt) @@ -280,10 +276,13 @@ def serialize(self): seri_opt = addrconv.ipv4.text_to_bin(self.magic_cookie) for opt in self.option_list: - seri_opt += opt.serialize() - seri_opt += binascii.a2b_hex('%x' % DHCP_END_OPT) - if self.options_len == 0: - self.options_len = len(seri_opt) + if isinstance(opt, option): + seri_opt += opt.serialize() + else: + seri_opt += opt + if isinstance(self.option_list[-1], option): + seri_opt += b'\xff' + self.options_len = len(seri_opt) return seri_opt @@ -330,7 +329,6 @@ return cls(tag, value, length) def serialize(self): - if self.length == 0: - self.length = len(self.value) + self.length = len(self.value) options_pack_str = '!BB%ds' % self.length return struct.pack(options_pack_str, self.tag, self.length, self.value) diff -Nru ryu-4.9/ryu/lib/packet/ether_types.py ryu-4.15/ryu/lib/packet/ether_types.py --- ryu-4.9/ryu/lib/packet/ether_types.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/ether_types.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,6 +16,7 @@ ETH_TYPE_IP = 0x0800 ETH_TYPE_ARP = 0x0806 +ETH_TYPE_TEB = 0x6558 ETH_TYPE_8021Q = 0x8100 ETH_TYPE_IPV6 = 0x86dd ETH_TYPE_SLOW = 0x8809 diff -Nru ryu-4.9/ryu/lib/packet/geneve.py ryu-4.15/ryu/lib/packet/geneve.py --- ryu-4.9/ryu/lib/packet/geneve.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/geneve.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,189 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Geneve packet parser/serializer +""" + +import struct + +from ryu.lib import stringify +from ryu.lib import type_desc +from . import packet_base +from . import ether_types + + +UDP_DST_PORT = 6081 + + +class geneve(packet_base.PacketBase): + """Geneve (RFC draft-ietf-nvo3-geneve-03) header encoder/decoder class. + + An instance has the following attributes at least. + Most of them are same to the on-wire counterparts but in host byte order. + __init__ takes the corresponding args in this order. + + ============== ======================================================== + Attribute Description + ============== ======================================================== + version Version. + opt_len The length of the options fields. + flags Flag field for OAM packet and Critical options present. + protocol Protocol Type field. + The Protocol Type is defined as "ETHER TYPES". + vni Identifier for unique element of virtual network. + options List of ``Option*`` instance. + ============== ======================================================== + """ + _HEADER_FMT = "!BBHI" + _MIN_LEN = struct.calcsize(_HEADER_FMT) + + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # |Ver| Opt Len |O|C| Rsvd. | Protocol Type | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Virtual Network Identifier (VNI) | Reserved | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Variable Length Options | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + # Flags + OAM_PACKET_FLAG = 1 << 7 + CRITICAL_OPTIONS_FLAG = 1 << 6 + + def __init__(self, version=0, opt_len=0, flags=0, + protocol=ether_types.ETH_TYPE_TEB, vni=None, options=None): + super(geneve, self).__init__() + + self.version = version + self.opt_len = opt_len + assert (flags & 0x3F) == 0 + self.flags = flags + self.protocol = protocol + self.vni = vni + for o in options: + assert isinstance(o, Option) + self.options = options + + @classmethod + def parser(cls, buf): + (ver_opt_len, flags, protocol, + vni) = struct.unpack_from(cls._HEADER_FMT, buf) + version = ver_opt_len >> 6 + # The Opt Len field expressed in four byte multiples. + opt_len = (ver_opt_len & 0x3F) * 4 + + opt_bin = buf[cls._MIN_LEN:cls._MIN_LEN + opt_len] + options = [] + while opt_bin: + option, opt_bin = Option.parser(opt_bin) + options.append(option) + + msg = cls(version, opt_len, flags, protocol, vni >> 8, options) + + from . import ethernet + geneve._TYPES = ethernet.ethernet._TYPES + geneve.register_packet_type(ethernet.ethernet, + ether_types.ETH_TYPE_TEB) + + return (msg, geneve.get_packet_type(protocol), + buf[cls._MIN_LEN + opt_len:]) + + def serialize(self, payload=None, prev=None): + tunnel_options = bytearray() + for o in self.options: + tunnel_options += o.serialize() + self.opt_len = len(tunnel_options) + # The Opt Len field expressed in four byte multiples. + opt_len = self.opt_len // 4 + + return (struct.pack(self._HEADER_FMT, + (self.version << 6) | opt_len, + self.flags, self.protocol, self.vni << 8) + + tunnel_options) + + +class Option(stringify.StringifyMixin, type_desc.TypeDisp): + """ + Tunnel Options + """ + _OPTION_PACK_STR = "!HBB" + _OPTION_LEN = struct.calcsize(_OPTION_PACK_STR) + + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Option Class | Type |R|R|R| Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Variable Option Data | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + def __init__(self, option_class=None, type_=None, length=0): + super(Option, self).__init__() + if option_class is None or type_ is None: + (option_class, type_) = self._rev_lookup_type(self.__class__) + self.option_class = option_class + self.type = type_ + self.length = length + + @classmethod + def parse_value(cls, buf): + # Sub-classes should override this method, if needed. + return {} + + def serialize_value(self): + # Sub-classes should override this method, if needed. + return b'' + + @classmethod + def parser(cls, buf): + (option_class, type_, + length) = struct.unpack_from(cls._OPTION_PACK_STR, buf) + + # The Length field expressed in four byte multiples. + length *= 4 + subcls = Option._lookup_type((option_class, type_)) + + return ( + subcls(option_class=option_class, type_=type_, length=length, + **subcls.parse_value( + buf[cls._OPTION_LEN:cls._OPTION_LEN + length])), + buf[cls._OPTION_LEN + length:]) + + def serialize(self, _payload=None, _prev=None): + data = self.serialize_value() + self.length = len(data) + # The Length field expressed in four byte multiples. + length = self.length // 4 + + return (struct.pack(self._OPTION_PACK_STR, int(self.option_class), + self.type, length) + data) + + +@Option.register_unknown_type() +class OptionDataUnknown(Option): + """ + Unknown Option Class and Type specific Option + """ + def __init__(self, buf, option_class=None, type_=None, length=0): + super(OptionDataUnknown, self).__init__(option_class=option_class, + type_=type_, + length=length) + self.buf = buf + + @classmethod + def parse_value(cls, buf): + return {"buf": buf} + + def serialize_value(self): + return self.buf diff -Nru ryu-4.9/ryu/lib/packet/gre.py ryu-4.15/ryu/lib/packet/gre.py --- ryu-4.9/ryu/lib/packet/gre.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/gre.py 2017-07-02 11:08:32.000000000 +0000 @@ -15,10 +15,10 @@ import struct +from ryu.lib.pack_utils import msg_pack_into from . import packet_base from . import packet_utils -from . import ether_types as ether -from ryu.lib.pack_utils import msg_pack_into +from . import ether_types GRE_CHECKSUM_FLG = 1 << 7 @@ -36,6 +36,7 @@ ============== ======================================================== Attribute Description ============== ======================================================== + version Version. protocol Protocol Type field. The Protocol Type is defined as "ETHER TYPES". checksum Checksum field(optional). @@ -44,6 +45,12 @@ key Key field(optional) This field is intended to be used for identifying an individual traffic flow within a tunnel. + vsid Virtual Subnet ID field(optional) + This field is a 24-bit value that is used + to identify the NVGRE-based Virtual Layer 2 Network. + flow_id FlowID field(optional) + This field is an 8-bit value that is used to provide + per-flow entropy for flows in the same VSID. seq_number Sequence Number field(optional) ============== ======================================================== """ @@ -54,16 +61,76 @@ _MIN_LEN = struct.calcsize(_PACK_STR) _CHECKSUM_LEN = struct.calcsize(_CHECKSUM_PACK_STR) _KEY_LEN = struct.calcsize(_KEY_PACK_STR) + _SEQNUM_PACK_LEN = struct.calcsize(_SEQNUM_PACK_STR) - def __init__(self, protocol=ether.ETH_TYPE_IP, - checksum=None, key=None, seq_number=None): + # GRE header + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # |C| |K|S| Reserved0 | Ver | Protocol Type | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Checksum (optional) | Reserved1 (Optional) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Key (optional) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Sequence Number (Optional) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + def __init__(self, version=0, protocol=ether_types.ETH_TYPE_IP, + checksum=None, key=None, vsid=None, flow_id=None, + seq_number=None): super(gre, self).__init__() + self.version = version self.protocol = protocol self.checksum = checksum - self.key = key self.seq_number = seq_number + if key is not None: + self._key = key + self._vsid = self._key >> 8 + self._flow_id = self._key & 0xff + elif (vsid is not None) and (flow_id is not None): + self._key = vsid << 8 | flow_id + self._vsid = vsid + self._flow_id = flow_id + else: + self._key = None + self._vsid = None + self._flow_id = None + + @property + def key(self): + return self._key + + @key.setter + def key(self, key): + if key is not None: + self._key = key + self._vsid = self._key >> 8 + self._flow_id = self._key & 0xff + else: + self._key = None + self._vsid = None + self._flow_id = None + + @property + def vsid(self): + return self._vsid + + @vsid.setter + def vsid(self, vsid): + self._key = vsid << 8 | (self._key & 0xff) + self._vsid = vsid + + @property + def flow_id(self): + return self._flow_id + + @flow_id.setter + def flow_id(self, flow_id): + self._key = (self._key & 0xffffff00) | flow_id + self._flow_id = flow_id + @classmethod def parser(cls, buf): present, version, protocol = struct.unpack_from(cls._PACK_STR, buf) @@ -82,25 +149,25 @@ if present & GRE_SEQUENCE_NUM_FLG: seq_number, = struct.unpack_from(cls._SEQNUM_PACK_STR, buf, gre_offset) + gre_offset += cls._SEQNUM_PACK_LEN - msg = cls(protocol, checksum, key, seq_number) + msg = cls(version=version, protocol=protocol, checksum=checksum, + key=key, seq_number=seq_number) from . import ethernet - # Because the protocol type field could either Ethertype is set, - # Set the _TYPES of ethernet, which owns the Ethernet types - # available in Ryu. gre._TYPES = ethernet.ethernet._TYPES + gre.register_packet_type(ethernet.ethernet, + ether_types.ETH_TYPE_TEB) return msg, gre.get_packet_type(protocol), buf[gre_offset:] def serialize(self, payload=None, prev=None): present = 0 - version = 0 hdr = bytearray() optional = bytearray() - if self.checksum: - present += GRE_CHECKSUM_FLG + if self.checksum is not None: + present |= GRE_CHECKSUM_FLG # For purposes of computing the checksum, # the value of the checksum field is zero. @@ -108,16 +175,16 @@ # Set in conjunction with checksum. optional += b'\x00' * self._CHECKSUM_LEN - if self.key: - present += GRE_KEY_FLG - optional += struct.pack(self._KEY_PACK_STR, self.key) + if self._key is not None: + present |= GRE_KEY_FLG + optional += struct.pack(self._KEY_PACK_STR, self._key) - if self.seq_number: - present += GRE_SEQUENCE_NUM_FLG + if self.seq_number is not None: + present |= GRE_SEQUENCE_NUM_FLG optional += struct.pack(self._SEQNUM_PACK_STR, self.seq_number) - msg_pack_into(self._PACK_STR, hdr, 0, - present, version, self.protocol) + msg_pack_into(self._PACK_STR, hdr, 0, present, self.version, + self.protocol) hdr += optional @@ -127,3 +194,24 @@ self.checksum) return hdr + + +def nvgre(version=0, vsid=0, flow_id=0): + """ + Generate instance of GRE class with information for NVGRE (RFC7637). + + :param version: Version. + :param vsid: Virtual Subnet ID. + :param flow_id: FlowID. + :return: Instance of GRE class with information for NVGRE. + """ + + # NVGRE header + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # |0| |1|0| Reserved0 | Ver | Protocol Type 0x6558 | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Virtual Subnet ID (VSID) | FlowID | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + return gre(version=version, protocol=ether_types.ETH_TYPE_TEB, + vsid=vsid, flow_id=flow_id) diff -Nru ryu-4.9/ryu/lib/packet/igmp.py ryu-4.15/ryu/lib/packet/igmp.py --- ryu-4.9/ryu/lib/packet/igmp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/igmp.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,8 +16,7 @@ """ Internet Group Management Protocol(IGMP) packet parser/serializer -RFC 1112 -IGMP v1 format +[RFC 1112] IGMP v1 format:: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 @@ -27,8 +26,7 @@ | Group Address | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -RFC 2236 -IGMP v2 format +[RFC 2236] IGMP v2 format:: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 @@ -38,8 +36,7 @@ | Group Address | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -RFC 3376 -IGMP v3 Membership Query format +[RFC 3376] IGMP v3 Membership Query format:: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 @@ -60,7 +57,7 @@ | Source Address [N] | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -IGMP v3 Membership Report format +IGMP v3 Membership Report format:: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 @@ -92,7 +89,7 @@ | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -where each Group Record has the following internal format: +Where each Group Record has the following internal format:: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Record Type | Aux Data Len | Number of Sources (N) | diff -Nru ryu-4.9/ryu/lib/packet/llc.py ryu-4.15/ryu/lib/packet/llc.py --- ryu-4.9/ryu/lib/packet/llc.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/llc.py 2017-07-02 11:08:32.000000000 +0000 @@ -19,7 +19,7 @@ http://standards.ieee.org/getieee802/download/802.2-1998.pdf -LLC format +LLC format:: +-----------------+--------------+ | DSAP address | 8 bits | @@ -30,7 +30,7 @@ +-----------------+--------------+ -DSAP address field +DSAP address field:: LSB +-----+---+---+---+---+---+---+---+ @@ -40,7 +40,7 @@ I/G bit = 1 : Group DSA D : DSAP address -SSAP address field +SSAP address field:: LSB +-----+---+---+---+---+---+---+---+ @@ -51,27 +51,30 @@ S : SSAP address -Control field +Control field: + +Information transfer +command/response +(I-format PDU):: - Information transfer - command/response - (I-format PDU) 1 2 3 4 5 6 7 8 9 10-16 +---+---+---+---+---+---+---+---+-----+------+ | 0 | N(S) | P/F | N(R) | +---+---+---+---+---+---+---+---+-----+------+ - Supervisory - commands/responses - (S-format PDUs) +Supervisory +commands/responses +(S-format PDUs):: + 1 2 3 4 5 6 7 8 9 10-16 +---+---+---+---+---+---+---+---+-----+------+ | 1 0 | S S | 0 0 0 0 | P/F | N(R) | +---+---+---+---+---+---+---+---+-----+------+ - Unnumbered - commands/responses - (U-format PDUs) +Unnumbered +commands/responses +(U-format PDUs):: + 1 2 3 4 5 6 7 8 +---+---+----+---+-----+---+----+---+ | 1 1 | M1 M1 | P/F | M2 M2 M2 | @@ -83,7 +86,6 @@ M1/M2: modifier function bit P/F : poll bit - command LLC PDUs final bit - response LLC PDUs - """ diff -Nru ryu-4.9/ryu/lib/packet/lldp.py ryu-4.15/ryu/lib/packet/lldp.py --- ryu-4.9/ryu/lib/packet/lldp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/lldp.py 2017-07-02 11:08:32.000000000 +0000 @@ -19,22 +19,32 @@ http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf -basic TLV format +basic TLV format:: -octets | 1 | 2 | 3 ... n + 2 | - -------------------------------------------------------- - | TLV type | TLV information | TLV information string | - | (7bits) | string length | ( 0 <= n <= 511 octets) | - | | (9bits) | | - -------------------------------------------------------- -bits |8 2|1|8 1| - - -LLDPDU format - - ------------------------------------------------------------------------ - | Chassis ID | Port ID | TTL | optional TLV | ... | optional TLV | End | - ------------------------------------------------------------------------ + octets | 1 | 2 | 3 ... n + 2 | + -------------------------------------------------------- + | TLV type | TLV information | TLV information string | + | (7bits) | string length | (0-507 octets) | + | | (9bits) | | + -------------------------------------------------------- + bits |8 2|1|8 1| + + +Organizationally specific TLV format:: + + octets | 1 | 2 | 3 ... 5 | 6 | 7 ... n + 6 | + --------------------------------------------------------------- + | TLV type | Length | OUI | Subtype | Infomation | + | (7bits) | (9bits) | (24bits) | (8bits) | (0-507 octets) | + --------------------------------------------------------------- + bits |8 2|1|8 1| + + +LLDPDU format:: + + ------------------------------------------------------------------------ + | Chassis ID | Port ID | TTL | optional TLV | ... | optional TLV | End | + ------------------------------------------------------------------------ Chasis ID, Port ID, TTL, End are mandatory optional TLV may be inserted in any order @@ -106,6 +116,16 @@ class lldp(packet_base.PacketBase): + """LLDPDU encoder/decoder class. + + An instance has the following attributes at least. + + ============== ===================================== + Attribute Description + ============== ===================================== + tlvs List of TLV instance. + ============== ===================================== + """ _tlv_parsers = {} def __init__(self, tlvs): @@ -180,6 +200,14 @@ @lldp.set_tlv_type(LLDP_TLV_END) class End(LLDPBasicTLV): + """End TLV encoder/decoder class + + ============== ===================================== + Attribute Description + ============== ===================================== + buf Binary data to parse. + ============== ===================================== + """ def __init__(self, buf=None, *args, **kwargs): super(End, self).__init__(buf, *args, **kwargs) if buf: @@ -194,6 +222,17 @@ @lldp.set_tlv_type(LLDP_TLV_CHASSIS_ID) class ChassisID(LLDPBasicTLV): + """Chassis ID TLV encoder/decoder class + + ============== ===================================== + Attribute Description + ============== ===================================== + buf Binary data to parse. + subtype Subtype. + chassis_id Chassis id corresponding to subtype. + ============== ===================================== + """ + _PACK_STR = '!B' _PACK_SIZE = struct.calcsize(_PACK_STR) # subtype id(1 octet) + chassis id length(1 - 255 octet) @@ -228,6 +267,16 @@ @lldp.set_tlv_type(LLDP_TLV_PORT_ID) class PortID(LLDPBasicTLV): + """Port ID TLV encoder/decoder class + + ============== ===================================== + Attribute Description + ============== ===================================== + buf Binary data to parse. + subtype Subtype. + port_id Port ID corresponding to subtype. + ============== ===================================== + """ _PACK_STR = '!B' _PACK_SIZE = struct.calcsize(_PACK_STR) @@ -263,6 +312,15 @@ @lldp.set_tlv_type(LLDP_TLV_TTL) class TTL(LLDPBasicTLV): + """Time To Live TLV encoder/decoder class + + ============== ===================================== + Attribute Description + ============== ===================================== + buf Binary data to parse. + ttl Time To Live. + ============== ===================================== + """ _PACK_STR = '!H' _PACK_SIZE = struct.calcsize(_PACK_STR) _LEN_MIN = _PACK_SIZE @@ -285,6 +343,15 @@ @lldp.set_tlv_type(LLDP_TLV_PORT_DESCRIPTION) class PortDescription(LLDPBasicTLV): + """Port description TLV encoder/decoder class + + ================= ===================================== + Attribute Description + ================= ===================================== + buf Binary data to parse. + port_description Port description. + ================= ===================================== + """ _LEN_MAX = 255 def __init__(self, buf=None, *args, **kwargs): @@ -311,6 +378,15 @@ @lldp.set_tlv_type(LLDP_TLV_SYSTEM_NAME) class SystemName(LLDPBasicTLV): + """System name TLV encoder/decoder class + + ================= ===================================== + Attribute Description + ================= ===================================== + buf Binary data to parse. + system_name System name. + ================= ===================================== + """ _LEN_MAX = 255 def __init__(self, buf=None, *args, **kwargs): @@ -337,6 +413,15 @@ @lldp.set_tlv_type(LLDP_TLV_SYSTEM_DESCRIPTION) class SystemDescription(LLDPBasicTLV): + """System description TLV encoder/decoder class + + =================== ===================================== + Attribute Description + =================== ===================================== + buf Binary data to parse. + system_description System description. + =================== ===================================== + """ _LEN_MAX = 255 def __init__(self, buf=None, *args, **kwargs): @@ -363,6 +448,17 @@ @lldp.set_tlv_type(LLDP_TLV_SYSTEM_CAPABILITIES) class SystemCapabilities(LLDPBasicTLV): + """System Capabilities TLV encoder/decoder class + + ================= ===================================== + Attribute Description + ================= ===================================== + buf Binary data to parse. + subtype Subtype. + system_cap System Capabilities. + enabled_cap Enabled Capabilities. + ================= ===================================== + """ # chassis subtype(1) + system cap(2) + enabled cap(2) _PACK_STR = '!BHH' _PACK_SIZE = struct.calcsize(_PACK_STR) @@ -402,6 +498,19 @@ @lldp.set_tlv_type(LLDP_TLV_MANAGEMENT_ADDRESS) class ManagementAddress(LLDPBasicTLV): + """Management Address TLV encoder/decoder class + + ================= ===================================== + Attribute Description + ================= ===================================== + buf Binary data to parse. + addr_subtype Address type. + addr Device address. + intf_subtype Interface type. + intf_num Interface number. + oid Object ID. + ================= ===================================== + """ _LEN_MIN = 9 _LEN_MAX = 167 @@ -463,12 +572,22 @@ self.addr_len <= self._ADDR_LEN_MAX) def _oid_len_valid(self): - return (self._OID_LEN_MIN <= self.oid_len and - self.oid_len <= self._OID_LEN_MAX) + return self._OID_LEN_MIN <= self.oid_len <= self._OID_LEN_MAX @lldp.set_tlv_type(LLDP_TLV_ORGANIZATIONALLY_SPECIFIC) class OrganizationallySpecific(LLDPBasicTLV): + """Organizationally Specific TLV encoder/decoder class + + ================= ============================================= + Attribute Description + ================= ============================================= + buf Binary data to parse. + oui Organizationally unique ID. + subtype Organizationally defined subtype. + info Organizationally defined information string. + ================= ============================================= + """ _PACK_STR = '!3sB' _PACK_SIZE = struct.calcsize(_PACK_STR) _LEN_MIN = _PACK_SIZE diff -Nru ryu-4.9/ryu/lib/packet/mpls.py ryu-4.15/ryu/lib/packet/mpls.py --- ryu-4.9/ryu/lib/packet/mpls.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/mpls.py 2017-07-02 11:08:32.000000000 +0000 @@ -73,6 +73,7 @@ def label_from_bin(buf): """ Converts binary representation label to integer. + :param buf: Binary representation of label. :return: MPLS Label and BoS bit. """ @@ -84,6 +85,7 @@ def label_to_bin(mpls_label, is_bos=True): """ Converts integer label to binary representation. + :param mpls_label: MPLS Label. :param is_bos: BoS bit. :return: Binary representation of label. diff -Nru ryu-4.9/ryu/lib/packet/openflow.py ryu-4.15/ryu/lib/packet/openflow.py --- ryu-4.9/ryu/lib/packet/openflow.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/openflow.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,113 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import struct + +from ryu.lib import stringify +from . import packet_base + + +class openflow(packet_base.PacketBase): + """OpenFlow message encoder/decoder class. + + An instance has the following attributes at least. + + ============== ========================================================= + Attribute Description + ============== ========================================================= + msg An instance of OpenFlow message (see :ref:`ofproto_ref`) + or an instance of OFPUnparseableMsg if failed to parse + packet as OpenFlow message. + ============== ========================================================= + """ + + PACK_STR = '!BBHI' + _MIN_LEN = struct.calcsize(PACK_STR) + + def __init__(self, msg): + super(openflow, self).__init__() + self.msg = msg + + @classmethod + def parser(cls, buf): + from ryu.ofproto import ofproto_parser + from ryu.ofproto import ofproto_protocol + + (version, msg_type, msg_len, xid) = ofproto_parser.header(buf) + + msg_parser = ofproto_parser._MSG_PARSERS.get(version) + if msg_parser is None: + msg = OFPUnparseableMsg( + None, version, msg_type, msg_len, xid, + buf[cls._MIN_LEN:msg_len]) + return cls(msg), cls, buf[msg_len:] + + datapath = ofproto_protocol.ProtocolDesc(version=version) + + try: + msg = msg_parser(datapath, version, msg_type, msg_len, xid, + buf[:msg_len]) + except: + msg = OFPUnparseableMsg( + datapath, version, msg_type, msg_len, xid, + buf[datapath.ofproto.OFP_HEADER_SIZE:msg_len]) + + return cls(msg), cls, buf[msg_len:] + + def serialize(self, _payload, _prev): + self.msg.serialize() + return self.msg.buf + + +class OFPUnparseableMsg(stringify.StringifyMixin): + """Unparseable OpenFlow message encoder class. + + An instance has the following attributes at least. + + ============== ====================================================== + Attribute Description + ============== ====================================================== + datapath A ryu.ofproto.ofproto_protocol.ProtocolDesc instance + for this message or None if OpenFlow protocol version + is unsupported version. + version OpenFlow protocol version + msg_type Type of OpenFlow message + msg_len Length of the message + xid Transaction id + body OpenFlow body data + ============== ====================================================== + + .. Note:: + + "datapath" attribute is different from + ryu.controller.controller.Datapath. + So you can not use "datapath" attribute to send OpenFlow messages. + For example, "datapath" attribute does not have send_msg method. + """ + + def __init__(self, datapath, version, msg_type, msg_len, xid, body): + self.datapath = datapath + self.version = version + self.msg_type = msg_type + self.msg_len = msg_len + self.xid = xid + self.body = body + self.buf = None + + def serialize(self): + self.buf = struct.pack( + openflow.PACK_STR, + self.version, self.msg_type, self.msg_len, self.xid) + self.buf += self.body diff -Nru ryu-4.9/ryu/lib/packet/ospf.py ryu-4.15/ryu/lib/packet/ospf.py --- ryu-4.9/ryu/lib/packet/ospf.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/ospf.py 2017-07-02 11:08:32.000000000 +0000 @@ -17,23 +17,21 @@ RFC 2328 OSPF version 2 """ -import six +from functools import reduce +import logging import struct -try: - # Python 3 - from functools import reduce -except ImportError: - # Python 2 - pass +import six -from ryu.lib.stringify import StringifyMixin +from ryu.lib import addrconv from ryu.lib.packet import packet_base from ryu.lib.packet import packet_utils from ryu.lib.packet import stream_parser +from ryu.lib.stringify import StringifyMixin +from ryu.lib import type_desc -from ryu.lib import addrconv -import logging + +LOG = logging.getLogger(__name__) _VERSION = 2 @@ -88,43 +86,6 @@ pass -class _TypeDisp(object): - _TYPES = {} - _REV_TYPES = None - _UNKNOWN_TYPE = None - - @classmethod - def register_unknown_type(cls): - def _register_type(subcls): - cls._UNKNOWN_TYPE = subcls - return subcls - return _register_type - - @classmethod - def register_type(cls, type_): - cls._TYPES = cls._TYPES.copy() - - def _register_type(subcls): - cls._TYPES[type_] = subcls - cls._REV_TYPES = None - return subcls - return _register_type - - @classmethod - def _lookup_type(cls, type_): - try: - return cls._TYPES[type_] - except KeyError: - return cls._UNKNOWN_TYPE - - @classmethod - def _rev_lookup_type(cls, targ_cls): - if cls._REV_TYPES is None: - rev = dict((v, k) for k, v in cls._TYPES.items()) - cls._REV_TYPES = rev - return cls._REV_TYPES[targ_cls] - - class LSAHeader(StringifyMixin): _HDR_PACK_STR = '!HBB4s4sIHH' _HDR_LEN = struct.calcsize(_HDR_PACK_STR) @@ -184,22 +145,34 @@ (id_,) = struct.unpack_from('4s', struct.pack('!I', id_)) adv_router = addrconv.ipv4.text_to_bin(self.adv_router) - return bytearray(struct.pack(self._HDR_PACK_STR, self.ls_age, - self.options, self.type_, id_, adv_router, - self.ls_seqnum, self.checksum, self.length)) + return bytearray( + struct.pack(self._HDR_PACK_STR, self.ls_age, + self.options, self.type_, id_, adv_router, + self.ls_seqnum, self.checksum, self.length)) -class LSA(_TypeDisp, StringifyMixin): +class LSA(type_desc.TypeDisp, StringifyMixin): def __init__(self, ls_age=0, options=0, type_=OSPF_UNKNOWN_LSA, id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0, checksum=0, length=0, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN, opaque_id=0): if type_ < OSPF_OPAQUE_LINK_LSA: - self.header = LSAHeader(ls_age, options, type_, id_, adv_router, - ls_seqnum, 0, 0) + self.header = LSAHeader( + ls_age=ls_age, + options=options, + type_=type_, + id_=id_, + adv_router=adv_router, + ls_seqnum=ls_seqnum) else: - self.header = LSAHeader(ls_age, options, type_, 0, adv_router, - ls_seqnum, 0, 0, opaque_type, opaque_id) + self.header = LSAHeader( + ls_age=ls_age, + options=options, + type_=type_, + adv_router=adv_router, + ls_seqnum=ls_seqnum, + opaque_type=opaque_type, + opaque_id=opaque_id) if not (checksum or length): tail = self.serialize_tail() @@ -241,6 +214,10 @@ struct.pack_into("!H", head, 16, csum) return head + tail + def serialize_tail(self): + # should be implemented in subclass + return b'' + @LSA.register_type(OSPF_ROUTER_LSA) class RouterLSA(LSA): @@ -275,8 +252,9 @@ def serialize(self): id_ = addrconv.ipv4.text_to_bin(self.id_) data = addrconv.ipv4.text_to_bin(self.data) - return bytearray(struct.pack(self._PACK_STR, id_, data, self.type_, - self.tos, self.metric)) + return bytearray( + struct.pack(self._PACK_STR, id_, data, self.type_, self.tos, + self.metric)) def __init__(self, ls_age=0, options=0, type_=OSPF_ROUTER_LSA, id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0, @@ -293,20 +271,20 @@ links = [] hdr = buf[:cls._PACK_LEN] buf = buf[cls._PACK_LEN:] - (flags, padding, num) = struct.unpack_from(cls._PACK_STR, - six.binary_type(hdr)) + (flags, _, num) = struct.unpack_from(cls._PACK_STR, + six.binary_type(hdr)) while buf: link, buf = cls.Link.parser(buf) links.append(link) - assert(len(links) == num) + assert len(links) == num return { "flags": flags, "links": links, } def serialize_tail(self): - head = bytearray(struct.pack(self._PACK_STR, self.flags, 0, - len(self.links))) + head = bytearray( + struct.pack(self._PACK_STR, self.flags, 0, len(self.links))) try: return head + reduce(lambda a, b: a + b, (link.serialize() for link in self.links)) @@ -353,15 +331,15 @@ def serialize_tail(self): mask = addrconv.ipv4.text_to_bin(self.mask) - routers = [addrconv.ipv4.text_to_bin( - router) for router in self.routers] - return bytearray(struct.pack("!" + "4s" * (1 + len(routers)), mask, - *routers)) + routers = [addrconv.ipv4.text_to_bin(router) + for router in self.routers] + return bytearray( + struct.pack("!" + "4s" * (1 + len(routers)), mask, *routers)) @LSA.register_type(OSPF_SUMMARY_LSA) class SummaryLSA(LSA): - _PACK_STR = '!4sBBH' + _PACK_STR = '!4sB3s' _PACK_LEN = struct.calcsize(_PACK_STR) def __init__(self, ls_age=0, options=0, type_=OSPF_SUMMARY_LSA, @@ -378,12 +356,12 @@ def parser(cls, buf): if len(buf) < cls._PACK_LEN: raise stream_parser.StreamParser.TooSmallException( - '%d < %d' % (len(buf), cls_PACK_LEN)) + '%d < %d' % (len(buf), cls._PACK_LEN)) buf = buf[:cls._PACK_LEN] - (mask, tos, metric_fst, metric_lst) = struct.unpack_from( + (mask, tos, metric) = struct.unpack_from( cls._PACK_STR, six.binary_type(buf)) mask = addrconv.ipv4.bin_to_text(mask) - metric = metric_fst << 16 | (metric_lst & 0xffff) + metric = type_desc.Int3.to_user(metric) return { "mask": mask, "tos": tos, @@ -392,8 +370,7 @@ def serialize_tail(self): mask = addrconv.ipv4.text_to_bin(self.mask) - metric_fst = (self.metric >> 16) & 0xff - metric_lst = self.metric & 0xffff + metric = type_desc.Int3.from_user(self.metric) return bytearray(struct.pack(self._PACK_STR, mask, self.tos, metric)) @@ -405,7 +382,7 @@ @LSA.register_type(OSPF_AS_EXTERNAL_LSA) class ASExternalLSA(LSA): class ExternalNetwork(StringifyMixin): - _PACK_STR = '!4sBBH4sI' + _PACK_STR = '!4sB3s4sI' _PACK_LEN = struct.calcsize(_PACK_STR) def __init__(self, mask='0.0.0.0', flags=0, metric=0, @@ -423,21 +400,20 @@ '%d < %d' % (len(buf), cls._PACK_LEN)) ext_nw = buf[:cls._PACK_LEN] rest = buf[cls._PACK_LEN:] - (mask, flags, metric_fst, metric_lst, fwd_addr, + (mask, flags, metric, fwd_addr, tag) = struct.unpack_from(cls._PACK_STR, six.binary_type(ext_nw)) mask = addrconv.ipv4.bin_to_text(mask) - metric = metric_fst << 16 | (metric_lst & 0xffff) + metric = type_desc.Int3.to_user(metric) fwd_addr = addrconv.ipv4.bin_to_text(fwd_addr) return cls(mask, flags, metric, fwd_addr, tag), rest def serialize(self): mask = addrconv.ipv4.text_to_bin(self.mask) - metric_fst = (self.metric >> 16) & 0xff - metric_lst = self.metric & 0xffff + metric = type_desc.Int3.from_user(self.metric) fwd_addr = addrconv.ipv4.text_to_bin(self.fwd_addr) - return bytearray(struct.pack(self._PACK_STR, mask, self.flags, - metric_fst, metric_lst, fwd_addr, - self.tag)) + return bytearray( + struct.pack(self._PACK_STR, mask, self.flags, metric, + fwd_addr, self.tag)) def __init__(self, ls_age=0, options=0, type_=OSPF_AS_EXTERNAL_LSA, id_='0.0.0.0', adv_router='0.0.0.0', ls_seqnum=0, @@ -468,7 +444,7 @@ pass -class ExtendedPrefixTLV(StringifyMixin, _TypeDisp): +class ExtendedPrefixTLV(StringifyMixin, type_desc.TypeDisp): pass @@ -516,6 +492,7 @@ def __init__(self, type_=OSPF_EXTENDED_PREFIX_SID_SUBTLV, length=0, flags=0, mt_id=0, algorithm=0, range_size=0, index=0): + super(PrefixSIDSubTLV, self).__init__() self.type_ = type_ self.length = length self.flags = flags @@ -541,7 +518,11 @@ self.algorithm, 0, self.range_size, 0, self.index) -class OpaqueBody(StringifyMixin, _TypeDisp): +class ExtendedLinkTLV(StringifyMixin, type_desc.TypeDisp): + pass + + +class OpaqueBody(StringifyMixin, type_desc.TypeDisp): def __init__(self, tlvs=None): tlvs = tlvs if tlvs else [] self.tlvs = tlvs @@ -590,6 +571,11 @@ class OpaqueLSA(LSA): + + def __init__(self, data, *args, **kwargs): + super(OpaqueLSA, self).__init__(*args, **kwargs) + self.data = data + @classmethod def parser(cls, buf, opaque_type=OSPF_OPAQUE_TYPE_UNKNOWN): opaquecls = OpaqueBody._lookup_type(opaque_type) @@ -639,7 +625,7 @@ length, opaque_type, opaque_id) -class OSPFMessage(packet_base.PacketBase, _TypeDisp): +class OSPFMessage(packet_base.PacketBase, type_desc.TypeDisp): """Base class for OSPF version 2 messages. """ @@ -649,6 +635,7 @@ def __init__(self, type_, length=None, router_id='0.0.0.0', area_id='0.0.0.0', au_type=1, authentication=0, checksum=None, version=_VERSION): + super(OSPFMessage, self).__init__() self.version = version self.type_ = type_ self.length = length @@ -695,11 +682,12 @@ def serialize(self, payload=None, prev=None): tail = self.serialize_tail() self.length = self._HDR_LEN + len(tail) - head = bytearray(struct.pack(self._HDR_PACK_STR, self.version, - self.type_, self.length, - addrconv.ipv4.text_to_bin(self.router_id), - addrconv.ipv4.text_to_bin(self.area_id), 0, - self.au_type, self.authentication)) + head = bytearray( + struct.pack(self._HDR_PACK_STR, self.version, + self.type_, self.length, + addrconv.ipv4.text_to_bin(self.router_id), + addrconv.ipv4.text_to_bin(self.area_id), 0, + self.au_type, self.authentication)) buf = head + tail csum = packet_utils.checksum(buf[:12] + buf[14:16] + buf[self._HDR_LEN:]) @@ -763,16 +751,17 @@ } def serialize_tail(self): - head = bytearray(struct.pack(self._PACK_STR, - addrconv.ipv4.text_to_bin(self.mask), - self.hello_interval, self.options, self.priority, - self.dead_interval, - addrconv.ipv4.text_to_bin(self.designated_router), - addrconv.ipv4.text_to_bin(self.backup_router))) + head = bytearray( + struct.pack(self._PACK_STR, + addrconv.ipv4.text_to_bin(self.mask), + self.hello_interval, self.options, self.priority, + self.dead_interval, + addrconv.ipv4.text_to_bin(self.designated_router), + addrconv.ipv4.text_to_bin(self.backup_router))) try: return head + reduce(lambda a, b: a + b, - (addrconv.ipv4.text_to_bin( - n) for n in self.neighbors)) + (addrconv.ipv4.text_to_bin(n) + for n in self.neighbors)) except TypeError: return head @@ -826,9 +815,9 @@ flags = ((self.i_flag & 0x1) << 2) ^ \ ((self.m_flag & 0x1) << 1) ^ \ (self.ms_flag & 0x1) - head = bytearray(struct.pack(self._PACK_STR, self.mtu, - self.options, flags, - self.sequence_number)) + head = bytearray( + struct.pack(self._PACK_STR, self.mtu, self.options, flags, + self.sequence_number)) try: return head + reduce(lambda a, b: a + b, (hdr.serialize() for hdr in self.lsa_headers)) @@ -866,8 +855,7 @@ def serialize(self): id_ = addrconv.ipv4.text_to_bin(self.id) adv_router = addrconv.ipv4.text_to_bin(self.adv_router) - return bytearray(struct.pack(self._PACK_STR, self.type_, - id_, adv_router)) + return struct.pack(self._PACK_STR, self.type_, id_, adv_router) def __init__(self, length=None, router_id='0.0.0.0', area_id='0.0.0.0', au_type=1, authentication=0, checksum=None, version=_VERSION, @@ -918,7 +906,7 @@ while buf: lsa, _cls, buf = LSA.parser(buf) lsas.append(lsa) - assert(len(lsas) == num) + assert len(lsas) == num return { "lsas": lsas, } diff -Nru ryu-4.9/ryu/lib/packet/safi.py ryu-4.15/ryu/lib/packet/safi.py --- ryu-4.9/ryu/lib/packet/safi.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/safi.py 2017-07-02 11:08:32.000000000 +0000 @@ -25,3 +25,5 @@ EVPN = 70 # RFC 7432 MPLS_VPN = 128 # RFC 4364 ROUTE_TARGET_CONSTRAINTS = 132 # RFC 4684 +IP_FLOWSPEC = 133 # RFC 5575 +VPN_FLOWSPEC = 134 # RFC 5575 diff -Nru ryu-4.9/ryu/lib/packet/sctp.py ryu-4.15/ryu/lib/packet/sctp.py --- ryu-4.9/ryu/lib/packet/sctp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/sctp.py 2017-07-02 11:08:32.000000000 +0000 @@ -385,7 +385,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Payload Data (DATA) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -465,7 +467,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Initiation (INIT) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -515,7 +519,9 @@ sub encoder/decoder class for Initiation Acknowledgement (INIT ACK) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -565,7 +571,9 @@ sub encoder/decoder class for Selective Acknowledgement (SACK) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -660,7 +668,9 @@ sub encoder/decoder class for Heartbeat Request (HEARTBEAT) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -703,7 +713,9 @@ sub encoder/decoder class for Heartbeat Acknowledgement (HEARTBEAT ACK) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -745,7 +757,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Abort Association (ABORT) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -822,7 +836,9 @@ sub encoder/decoder class for Shutdown Association (SHUTDOWN) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -875,7 +891,9 @@ sub encoder/decoder class for Shutdown Acknowledgement (SHUTDOWN ACK) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -902,7 +920,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Operation Error (ERROR) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -974,7 +994,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Cookie Echo (COOKIE ECHO) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1035,7 +1057,9 @@ sub encoder/decoder class for Cookie Acknowledgement (COOKIE ACK) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1062,7 +1086,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for ECN-Echo chunk (RFC 4960 Appendix A.). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1090,7 +1116,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for CWR chunk (RFC 4960 Appendix A.). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1119,7 +1147,9 @@ sub encoder/decoder class for Shutdown Complete (SHUTDOWN COMPLETE) chunk (RFC 4960). - This is used with ryu.lib.packet.sctp.sctp. + This class is used with the following. + + - ryu.lib.packet.sctp.sctp An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1239,8 +1269,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Invalid Stream Identifier (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1286,8 +1318,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Missing Mandatory Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1356,8 +1390,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Stale Cookie Error (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1385,8 +1421,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Out of Resource (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1418,8 +1456,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Unresolvable Address (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1483,8 +1523,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Unrecognized Chunk Type (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1512,8 +1554,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Invalid Mandatory Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1545,8 +1589,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Unrecognized Parameters (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1574,8 +1620,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for No User Data (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1605,8 +1653,10 @@ sub encoder/decoder class for Cookie Received While Shutting Down (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1639,8 +1689,10 @@ sub encoder/decoder class for Restart of an Association with New Addresses (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1712,8 +1764,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for User-Initiated Abort (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1741,8 +1795,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Protocol Violation (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_abort and - ryu.lib.packet.sctp.chunk_error. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_abort + - ryu.lib.packet.sctp.chunk_error An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1819,8 +1875,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Heartbeat Info Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_heartbeat and - ryu.lib.packet.sctp.chunk_heartbeat_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_heartbeat + - ryu.lib.packet.sctp.chunk_heartbeat_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1847,7 +1905,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for State Cookie Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1874,7 +1934,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Unrecognized Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1901,7 +1963,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Cookie Preservative Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1947,8 +2011,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for ECN Parameter (RFC 4960 Appendix A.). - This is used with ryu.lib.packet.sctp.chunk_init and - ryu.lib.packet.sctp.chunk_init_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init + - ryu.lib.packet.sctp.chunk_init_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -1983,8 +2049,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Host Name Address Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init and - ryu.lib.packet.sctp.chunk_init_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init + - ryu.lib.packet.sctp.chunk_init_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -2011,7 +2079,9 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for Supported Address Types Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -2075,8 +2145,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for IPv4 Address Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init and - ryu.lib.packet.sctp.chunk_init_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init + - ryu.lib.packet.sctp.chunk_init_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. @@ -2130,8 +2202,10 @@ """Stream Control Transmission Protocol (SCTP) sub encoder/decoder class for IPv6 Address Parameter (RFC 4960). - This is used with ryu.lib.packet.sctp.chunk_init and - ryu.lib.packet.sctp.chunk_init_ack. + This class is used with the following. + + - ryu.lib.packet.sctp.chunk_init + - ryu.lib.packet.sctp.chunk_init_ack An instance has the following attributes at least. Most of them are same to the on-wire counterparts but in host byte order. diff -Nru ryu-4.9/ryu/lib/packet/tcp.py ryu-4.15/ryu/lib/packet/tcp.py --- ryu-4.9/ryu/lib/packet/tcp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/tcp.py 2017-07-02 11:08:32.000000000 +0000 @@ -13,14 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import six import struct import logging +import six + +from ryu.lib import stringify from . import packet_base from . import packet_utils from . import bgp -from ryu.lib import stringify +from . import openflow +from . import zebra LOG = logging.getLogger(__name__) @@ -111,8 +114,16 @@ @staticmethod def get_payload_type(src_port, dst_port): + from ryu.ofproto.ofproto_common import OFP_TCP_PORT, OFP_SSL_PORT_OLD if bgp.TCP_SERVER_PORT in [src_port, dst_port]: return bgp.BGPMessage + elif(src_port in [OFP_TCP_PORT, OFP_SSL_PORT_OLD] or + dst_port in [OFP_TCP_PORT, OFP_SSL_PORT_OLD]): + return openflow.openflow + elif src_port == zebra.ZEBRA_PORT: + return zebra._ZebraMessageFromZebra + elif dst_port == zebra.ZEBRA_PORT: + return zebra.ZebraMessage else: return None @@ -166,7 +177,7 @@ if len(h) < offset: h.extend(bytearray(offset - len(h))) - if 0 == self.offset: + if self.offset == 0: self.offset = len(h) >> 2 offset = self.offset << 4 struct.pack_into('!B', h, 12, offset) diff -Nru ryu-4.9/ryu/lib/packet/udp.py ryu-4.15/ryu/lib/packet/udp.py --- ryu-4.9/ryu/lib/packet/udp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/udp.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,7 +18,9 @@ from . import packet_base from . import packet_utils from . import dhcp +from . import dhcp6 from . import vxlan +from . import geneve class udp(packet_base.PacketBase): @@ -52,14 +54,17 @@ @staticmethod def get_packet_type(src_port, dst_port): - if ((src_port == 68 and dst_port == 67) or - (src_port == 67 and dst_port == 68) or - (src_port == 67 and - dst_port == 67)): + if ((src_port in [67, 68] and dst_port == 67) or + (dst_port in [67, 68] and src_port == 67)): return dhcp.dhcp + if ((src_port in [546, 547] and dst_port == 547) or + (dst_port in [546, 547] and src_port == 547)): + return dhcp6.dhcp6 if (dst_port == vxlan.UDP_DST_PORT or dst_port == vxlan.UDP_DST_PORT_OLD): return vxlan.vxlan + if dst_port == geneve.UDP_DST_PORT: + return geneve.geneve return None @classmethod diff -Nru ryu-4.9/ryu/lib/packet/vrrp.py ryu-4.15/ryu/lib/packet/vrrp.py --- ryu-4.9/ryu/lib/packet/vrrp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/vrrp.py 2017-07-02 11:08:32.000000000 +0000 @@ -17,8 +17,8 @@ """ VRRP packet parser/serializer -RFC 3768 -VRRP v2 packet format +[RFC 3768] VRRP v2 packet format:: + 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -40,8 +40,8 @@ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -RFC 5798 -VRRP v3 packet format +[RFC 5798] VRRP v3 packet format:: + 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -64,7 +64,6 @@ + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - """ import struct diff -Nru ryu-4.9/ryu/lib/packet/vxlan.py ryu-4.15/ryu/lib/packet/vxlan.py --- ryu-4.9/ryu/lib/packet/vxlan.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/vxlan.py 2017-07-02 11:08:32.000000000 +0000 @@ -13,32 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -VXLAN packet parser/serializer - -RFC 7348 -VXLAN Header: -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -|R|R|R|R|I|R|R|R| Reserved | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ -| VXLAN Network Identifier (VNI) | Reserved | -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - -- Flags (8 bits): where the I flag MUST be set to 1 for a valid - VXLAN Network ID (VNI). The other 7 bits (designated "R") are - reserved fields and MUST be set to zero on transmission and - ignored on receipt. - -- VXLAN Segment ID/VXLAN Network Identifier (VNI): this is a - 24-bit value used to designate the individual VXLAN overlay - network on which the communicating VMs are situated. VMs in - different VXLAN overlay networks cannot communicate with each - other. - -- Reserved fields (24 bits and 8 bits): MUST be set to zero on - transmission and ignored on receipt. -""" - import struct import logging @@ -72,6 +46,13 @@ _PACK_STR = '!II' _MIN_LEN = struct.calcsize(_PACK_STR) + # VXLAN Header: + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # |R|R|R|R|I|R|R|R| Reserved | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | VXLAN Network Identifier (VNI) | Reserved | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + def __init__(self, vni): super(vxlan, self).__init__() self.vni = vni @@ -95,6 +76,7 @@ def vni_from_bin(buf): """ Converts binary representation VNI to integer. + :param buf: binary representation of VNI. :return: VNI integer. """ @@ -104,6 +86,7 @@ def vni_to_bin(vni): """ Converts integer VNI to binary representation. + :param vni: integer of VNI :return: binary representation of VNI. """ diff -Nru ryu-4.9/ryu/lib/packet/zebra.py ryu-4.15/ryu/lib/packet/zebra.py --- ryu-4.9/ryu/lib/packet/zebra.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/lib/packet/zebra.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,2016 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Zebra protocol parser/serializer + +Zebra Protocol is used to communicate with the zebra daemon. +""" + +import abc +import socket +import struct +import logging + +import netaddr +import six + +from ryu.lib import addrconv +from ryu.lib import ip +from ryu.lib import stringify +from ryu.lib import type_desc +from . import packet_base +from . import bgp +from . import safi as packet_safi + + +LOG = logging.getLogger(__name__) + + +# Constants in quagga/lib/zebra.h + +# Default Zebra TCP port +ZEBRA_PORT = 2600 + +# Zebra message types +ZEBRA_INTERFACE_ADD = 1 +ZEBRA_INTERFACE_DELETE = 2 +ZEBRA_INTERFACE_ADDRESS_ADD = 3 +ZEBRA_INTERFACE_ADDRESS_DELETE = 4 +ZEBRA_INTERFACE_UP = 5 +ZEBRA_INTERFACE_DOWN = 6 +ZEBRA_IPV4_ROUTE_ADD = 7 +ZEBRA_IPV4_ROUTE_DELETE = 8 +ZEBRA_IPV6_ROUTE_ADD = 9 +ZEBRA_IPV6_ROUTE_DELETE = 10 +ZEBRA_REDISTRIBUTE_ADD = 11 +ZEBRA_REDISTRIBUTE_DELETE = 12 +ZEBRA_REDISTRIBUTE_DEFAULT_ADD = 13 +ZEBRA_REDISTRIBUTE_DEFAULT_DELETE = 14 +ZEBRA_IPV4_NEXTHOP_LOOKUP = 15 +ZEBRA_IPV6_NEXTHOP_LOOKUP = 16 +ZEBRA_IPV4_IMPORT_LOOKUP = 17 +ZEBRA_IPV6_IMPORT_LOOKUP = 18 +ZEBRA_INTERFACE_RENAME = 19 +ZEBRA_ROUTER_ID_ADD = 20 +ZEBRA_ROUTER_ID_DELETE = 21 +ZEBRA_ROUTER_ID_UPDATE = 22 +ZEBRA_HELLO = 23 +ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB = 24 +ZEBRA_VRF_UNREGISTER = 25 +ZEBRA_INTERFACE_LINK_PARAMS = 26 +ZEBRA_NEXTHOP_REGISTER = 27 +ZEBRA_NEXTHOP_UNREGISTER = 28 +ZEBRA_NEXTHOP_UPDATE = 29 +ZEBRA_MESSAGE_MAX = 30 + +# Zebra route types +ZEBRA_ROUTE_SYSTEM = 0 +ZEBRA_ROUTE_KERNEL = 1 +ZEBRA_ROUTE_CONNECT = 2 +ZEBRA_ROUTE_STATIC = 3 +ZEBRA_ROUTE_RIP = 4 +ZEBRA_ROUTE_RIPNG = 5 +ZEBRA_ROUTE_OSPF = 6 +ZEBRA_ROUTE_OSPF6 = 7 +ZEBRA_ROUTE_ISIS = 8 +ZEBRA_ROUTE_BGP = 9 +ZEBRA_ROUTE_PIM = 10 +ZEBRA_ROUTE_HSLS = 11 +ZEBRA_ROUTE_OLSR = 12 +ZEBRA_ROUTE_BABEL = 13 +ZEBRA_ROUTE_MAX = 14 + +# Zebra message flags +ZEBRA_FLAG_INTERNAL = 0x01 +ZEBRA_FLAG_SELFROUTE = 0x02 +ZEBRA_FLAG_BLACKHOLE = 0x04 +ZEBRA_FLAG_IBGP = 0x08 +ZEBRA_FLAG_SELECTED = 0x10 +ZEBRA_FLAG_FIB_OVERRIDE = 0x20 +ZEBRA_FLAG_STATIC = 0x40 +ZEBRA_FLAG_REJECT = 0x80 + +# Zebra nexthop flags +ZEBRA_NEXTHOP_IFINDEX = 1 +ZEBRA_NEXTHOP_IFNAME = 2 +ZEBRA_NEXTHOP_IPV4 = 3 +ZEBRA_NEXTHOP_IPV4_IFINDEX = 4 +ZEBRA_NEXTHOP_IPV4_IFNAME = 5 +ZEBRA_NEXTHOP_IPV6 = 6 +ZEBRA_NEXTHOP_IPV6_IFINDEX = 7 +ZEBRA_NEXTHOP_IPV6_IFNAME = 8 +ZEBRA_NEXTHOP_BLACKHOLE = 9 + + +# Constants in quagga/lib/zclient.h + +# Zebra API message flags +ZAPI_MESSAGE_NEXTHOP = 0x01 +ZAPI_MESSAGE_IFINDEX = 0x02 +ZAPI_MESSAGE_DISTANCE = 0x04 +ZAPI_MESSAGE_METRIC = 0x08 +ZAPI_MESSAGE_MTU = 0x10 +ZAPI_MESSAGE_TAG = 0x20 + + +# Constants in quagga/lib/if.h + +# Interface name length +# Linux define value in /usr/include/linux/if.h. +# #define IFNAMSIZ 16 +# FreeBSD define value in /usr/include/net/if.h. +# #define IFNAMSIZ 16 +INTERFACE_NAMSIZE = 20 +INTERFACE_HWADDR_MAX = 20 + +# Zebra internal interface status +ZEBRA_INTERFACE_ACTIVE = 1 << 0 +ZEBRA_INTERFACE_SUB = 1 << 1 +ZEBRA_INTERFACE_LINKDETECTION = 1 << 2 + +# Zebra link layer types +ZEBRA_LLT_UNKNOWN = 0 +ZEBRA_LLT_ETHER = 1 +ZEBRA_LLT_EETHER = 2 +ZEBRA_LLT_AX25 = 3 +ZEBRA_LLT_PRONET = 4 +ZEBRA_LLT_IEEE802 = 5 +ZEBRA_LLT_ARCNET = 6 +ZEBRA_LLT_APPLETLK = 7 +ZEBRA_LLT_DLCI = 8 +ZEBRA_LLT_ATM = 9 +ZEBRA_LLT_METRICOM = 10 +ZEBRA_LLT_IEEE1394 = 11 +ZEBRA_LLT_EUI64 = 12 +ZEBRA_LLT_INFINIBAND = 13 +ZEBRA_LLT_SLIP = 14 +ZEBRA_LLT_CSLIP = 15 +ZEBRA_LLT_SLIP6 = 16 +ZEBRA_LLT_CSLIP6 = 17 +ZEBRA_LLT_RSRVD = 18 +ZEBRA_LLT_ADAPT = 19 +ZEBRA_LLT_ROSE = 20 +ZEBRA_LLT_X25 = 21 +ZEBRA_LLT_PPP = 22 +ZEBRA_LLT_CHDLC = 23 +ZEBRA_LLT_LAPB = 24 +ZEBRA_LLT_RAWHDLC = 25 +ZEBRA_LLT_IPIP = 26 +ZEBRA_LLT_IPIP6 = 27 +ZEBRA_LLT_FRAD = 28 +ZEBRA_LLT_SKIP = 29 +ZEBRA_LLT_LOOPBACK = 30 +ZEBRA_LLT_LOCALTLK = 31 +ZEBRA_LLT_FDDI = 32 +ZEBRA_LLT_SIT = 33 +ZEBRA_LLT_IPDDP = 34 +ZEBRA_LLT_IPGRE = 35 +ZEBRA_LLT_IP6GRE = 36 +ZEBRA_LLT_PIMREG = 37 +ZEBRA_LLT_HIPPI = 38 +ZEBRA_LLT_ECONET = 39 +ZEBRA_LLT_IRDA = 40 +ZEBRA_LLT_FCPP = 41 +ZEBRA_LLT_FCAL = 42 +ZEBRA_LLT_FCPL = 43 +ZEBRA_LLT_FCFABRIC = 44 +ZEBRA_LLT_IEEE802_TR = 45 +ZEBRA_LLT_IEEE80211 = 46 +ZEBRA_LLT_IEEE80211_RADIOTAP = 47 +ZEBRA_LLT_IEEE802154 = 48 +ZEBRA_LLT_IEEE802154_PHY = 49 + +# "non-official" architectural constants +MAX_CLASS_TYPE = 8 + + +# Utility functions/classes + +IPv4Prefix = bgp.IPAddrPrefix +IPv6Prefix = bgp.IP6AddrPrefix + + +def _parse_ip_prefix(family, buf): + if family == socket.AF_INET: + prefix, rest = bgp.IPAddrPrefix.parser(buf) + elif family == socket.AF_INET6: + prefix, rest = IPv6Prefix.parser(buf) + else: + raise struct.error('Unsupported family: %d' % family) + + return prefix.prefix, rest + + +def _serialize_ip_prefix(prefix): + if ip.valid_ipv4(prefix): + prefix_addr, prefix_num = prefix.split('/') + return bgp.IPAddrPrefix(int(prefix_num), prefix_addr).serialize() + elif ip.valid_ipv6(prefix): + prefix_addr, prefix_num = prefix.split('/') + return IPv6Prefix(int(prefix_num), prefix_addr).serialize() + else: + raise ValueError('Invalid prefix: %s' % prefix) + + +class InterfaceLinkParams(stringify.StringifyMixin): + """ + Interface Link Parameters class for if_link_params structure. + """ + # Interface Link Parameters structure: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Status of Link Parameters | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Traffic Engineering metric | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Maximum Bandwidth | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Maximum Reservable Bandwidth | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Unreserved Bandwidth per Class Type * MAX_CLASS_TYPE | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Administrative group | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Remote AS number | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Remote IP address | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Link Average Delay | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Link Min Delay | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Link Max Delay | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Link Delay Variation | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Link Packet Loss | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Residual Bandwidth | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Available Bandwidth | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (float) Utilized Bandwidth | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IIff' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _REPEATED_FMT = '!f' + REPEATED_SIZE = struct.calcsize(_REPEATED_FMT) + _FOOTER_FMT = '!II4sIIIIffff' + FOOTER_SIZE = struct.calcsize(_FOOTER_FMT) + + def __init__(self, lp_status, te_metric, max_bw, max_reserved_bw, + unreserved_bw, admin_group, remote_as, remote_ip, + average_delay, min_delay, max_delay, delay_var, pkt_loss, + residual_bw, average_bw, utilized_bw): + super(InterfaceLinkParams, self).__init__() + self.lp_status = lp_status + self.te_metric = te_metric + self.max_bw = max_bw + self.max_reserved_bw = max_reserved_bw + assert isinstance(unreserved_bw, (list, tuple)) + assert len(unreserved_bw) == MAX_CLASS_TYPE + self.unreserved_bw = unreserved_bw + self.admin_group = admin_group + self.remote_as = remote_as + assert netaddr.valid_ipv4(remote_ip) + self.remote_ip = remote_ip + self.average_delay = average_delay + self.min_delay = min_delay + self.max_delay = max_delay + self.delay_var = delay_var + self.pkt_loss = pkt_loss + self.residual_bw = residual_bw + self.average_bw = average_bw + self.utilized_bw = utilized_bw + + @classmethod + def parse(cls, buf): + (lp_status, te_metric, max_bw, + max_reserved_bw) = struct.unpack_from(cls._HEADER_FMT, buf) + offset = cls.HEADER_SIZE + + unreserved_bw = [] + for _ in range(MAX_CLASS_TYPE): + (u_bw,) = struct.unpack_from(cls._REPEATED_FMT, buf, offset) + unreserved_bw.append(u_bw) + offset += cls.REPEATED_SIZE + + (admin_group, remote_as, remote_ip, average_delay, min_delay, + max_delay, delay_var, pkt_loss, residual_bw, average_bw, + utilized_bw) = struct.unpack_from( + cls._FOOTER_FMT, buf, offset) + offset += cls.FOOTER_SIZE + + remote_ip = addrconv.ipv4.bin_to_text(remote_ip) + + return cls(lp_status, te_metric, max_bw, max_reserved_bw, + unreserved_bw, admin_group, remote_as, remote_ip, + average_delay, min_delay, max_delay, delay_var, pkt_loss, + residual_bw, average_bw, utilized_bw), buf[offset:] + + def serialize(self): + buf = struct.pack( + self._HEADER_FMT, self.lp_status, self.te_metric, self.max_bw, + self.max_reserved_bw) + + for u_bw in self.unreserved_bw: + buf += struct.pack(self._REPEATED_FMT, u_bw) + + remote_ip = addrconv.ipv4.text_to_bin(self.remote_ip) + + buf += struct.pack( + self._FOOTER_FMT, self.admin_group, self.remote_as, remote_ip, + self.average_delay, self.min_delay, self.max_delay, + self.delay_var, self.pkt_loss, self.residual_bw, self.average_bw, + self.utilized_bw) + + return buf + + +@six.add_metaclass(abc.ABCMeta) +class _NextHop(type_desc.TypeDisp, stringify.StringifyMixin): + """ + Base class for Zebra Nexthop structure. + """ + # Zebra Nexthop structure: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthop Type | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 address or Interface Index number (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!B' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, ifindex=None, ifname=None, addr=None, type_=None): + super(_NextHop, self).__init__() + self.ifindex = ifindex + self.ifname = ifname + self.addr = addr + if type_ is None: + type_ = self._rev_lookup_type(self.__class__) + self.type = type_ + + @classmethod + @abc.abstractmethod + def parse(cls, buf): + (type_,) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + subcls = cls._lookup_type(type_) + if subcls is None: + raise struct.error('unsupported Nexthop type: %d' % type_) + + return subcls.parse(rest) + + @abc.abstractmethod + def _serialize(self): + return b'' + + def serialize(self): + return struct.pack(self._HEADER_FMT, self.type) + self._serialize() + + +_NEXTHOP_COUNT_FMT = '!B' # nexthop_count +_NEXTHOP_COUNT_SIZE = struct.calcsize(_NEXTHOP_COUNT_FMT) + + +def _parse_nexthops(buf): + (nexthop_count,) = struct.unpack_from(_NEXTHOP_COUNT_FMT, buf) + rest = buf[_NEXTHOP_COUNT_SIZE:] + + nexthops = [] + for _ in range(nexthop_count): + nexthop, rest = _NextHop.parse(rest) + nexthops.append(nexthop) + + return nexthops, rest + + +def _serialize_nexthops(nexthops): + nexthop_count = len(nexthops) + buf = struct.pack(_NEXTHOP_COUNT_FMT, nexthop_count) + + if nexthop_count == 0: + return buf + + for nexthop in nexthops: + buf += nexthop.serialize() + + return buf + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IFINDEX) +class NextHopIFIndex(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IFINDEX type. + """ + _BODY_FMT = '!I' # ifindex + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + (ifindex,) = struct.unpack_from(cls._BODY_FMT, buf) + rest = buf[cls.BODY_SIZE:] + + return cls(ifindex=ifindex), rest + + def _serialize(self): + return struct.pack(self._BODY_FMT, self.ifindex) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IFNAME) +class NextHopIFName(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IFNAME type. + """ + _BODY_FMT = '!I' # ifindex + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + (ifindex,) = struct.unpack_from(cls._BODY_FMT, buf) + rest = buf[cls.BODY_SIZE:] + + return cls(ifindex=ifindex), rest + + def _serialize(self): + return struct.pack(self._BODY_FMT, self.ifindex) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IPV4) +class NextHopIPv4(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IPV4 type. + """ + _BODY_FMT = '!4s' # addr(IPv4) + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + addr = addrconv.ipv4.bin_to_text(buf[:cls.BODY_SIZE]) + rest = buf[cls.BODY_SIZE:] + + return cls(addr=addr), rest + + def _serialize(self): + return addrconv.ipv4.text_to_bin(self.addr) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IPV4_IFINDEX) +class NextHopIPv4IFIndex(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IPV4_IFINDEX type. + """ + _BODY_FMT = '!4sI' # addr(IPv4), ifindex + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + (addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf) + addr = addrconv.ipv4.bin_to_text(addr) + rest = buf[cls.BODY_SIZE:] + + return cls(ifindex=ifindex, addr=addr), rest + + def _serialize(self): + addr = addrconv.ipv4.text_to_bin(self.addr) + + return struct.pack(self._BODY_FMT, addr, self.ifindex) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IPV4_IFNAME) +class NextHopIPv4IFName(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IPV4_IFNAME type. + """ + _BODY_FMT = '!4sI' # addr(IPv4), ifindex + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + (addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf) + addr = addrconv.ipv4.bin_to_text(addr) + rest = buf[cls.BODY_SIZE:] + + return cls(ifindex=ifindex, addr=addr), rest + + def _serialize(self): + addr = addrconv.ipv4.text_to_bin(self.addr) + + return struct.pack(self._BODY_FMT, addr, self.ifindex) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IPV6) +class NextHopIPv6(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IPV6 type. + """ + _BODY_FMT = '!16s' # addr(IPv6) + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + addr = addrconv.ipv6.bin_to_text(buf[:cls.BODY_SIZE]) + rest = buf[cls.BODY_SIZE:] + + return cls(addr=addr), rest + + def _serialize(self): + return addrconv.ipv6.text_to_bin(self.addr) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IPV6_IFINDEX) +class NextHopIPv6IFIndex(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IPV6_IFINDEX type. + """ + _BODY_FMT = '!16sI' # addr(IPv6), ifindex + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + (addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf) + addr = addrconv.ipv6.bin_to_text(addr) + rest = buf[cls.BODY_SIZE:] + + return cls(ifindex=ifindex, addr=addr), rest + + def _serialize(self): + addr = addrconv.ipv6.text_to_bin(self.addr) + + return struct.pack(self._BODY_FMT, addr, self.ifindex) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_IPV6_IFNAME) +class NextHopIPv6IFName(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_IPV6_IFNAME type. + """ + _BODY_FMT = '!16sI' # addr(IPv6), ifindex + BODY_SIZE = struct.calcsize(_BODY_FMT) + + @classmethod + def parse(cls, buf): + (addr, ifindex) = struct.unpack_from(cls._BODY_FMT, buf) + addr = addrconv.ipv6.bin_to_text(addr) + rest = buf[cls.BODY_SIZE:] + + return cls(ifindex=ifindex, addr=addr), rest + + def _serialize(self): + addr = addrconv.ipv6.text_to_bin(self.addr) + + return struct.pack(self._BODY_FMT, addr, self.ifindex) + + +@_NextHop.register_type(ZEBRA_NEXTHOP_BLACKHOLE) +class NextHopBlackhole(_NextHop): + """ + Nexthop class for ZEBRA_NEXTHOP_BLACKHOLE type. + """ + + @classmethod + def parse(cls, buf): + return cls(), buf + + def _serialize(self): + return b'' + + +class RegisteredNexthop(stringify.StringifyMixin): + """ + Unit of ZEBRA_NEXTHOP_REGISTER message body. + """ + # Unit of Zebra Nexthop Register message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Connected | Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 Prefix (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!?H' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, connected, family, prefix): + super(RegisteredNexthop, self).__init__() + self.connected = connected + self.family = family + if isinstance(prefix, (IPv4Prefix, IPv6Prefix)): + prefix = prefix.prefix + self.prefix = prefix + + @classmethod + def parse(cls, buf): + (connected, family) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + prefix, rest = _parse_ip_prefix(family, rest) + + return cls(connected, family, prefix), rest + + def serialize(self): + buf = struct.pack(self._HEADER_FMT, self.connected, self.family) + + return buf + _serialize_ip_prefix(self.prefix) + + +# Zebra message class + +class ZebraMessage(packet_base.PacketBase): + """ + Zebra protocol parser/serializer class. + + An instance has the following attributes at least. + Most of them are same to the on-wire counterparts but in host byte order. + __init__ takes the corresponding args in this order. + + ============== ========================================================== + Attribute Description + ============== ========================================================== + length Total packet length including this header. + The minimum length is 3 bytes for version 0 messages, + 6 bytes for version 1/2 messages and 8 bytes for version + 3 messages. + version Version number of the Zebra protocol message. + To instantiate messages with other than the default + version, ``version`` must be specified. + vrf_id VRF ID for the route contained in message. + Not present in version 0/1/2 messages in the on-wire + structure, and always 0 for theses version. + command Zebra Protocol command, which denotes message type. + body Messages body. + An instance of subclass of ``_ZebraMessageBody`` named + like "Zebra + " (e.g., ``ZebraHello``). + Or ``None`` if message does not contain any body. + ============== ========================================================== + + .. Note:: + + To instantiate Zebra messages, ``command`` can be omitted when the + valid ``body`` is specified. + + :: + + >>> from ryu.lib.packet import zebra + >>> zebra.ZebraMessage(body=zebra.ZebraHello()) + ZebraMessage(body=ZebraHello(route_type=14),command=23, + length=None,version=3,vrf_id=0) + + On the other hand, if ``body`` is omitted, ``command`` must be + specified. + + :: + + >>> zebra.ZebraMessage(command=zebra.ZEBRA_INTERFACE_ADD) + ZebraMessage(body=None,command=1,length=None,version=3,vrf_id=0) + """ + + # Zebra Protocol Common Header (version 0): + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Length | Command | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _V0_HEADER_FMT = '!HB' + V0_HEADER_SIZE = struct.calcsize(_V0_HEADER_FMT) + _MIN_LEN = V0_HEADER_SIZE + + # Zebra Protocol Common Header (version 1): + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Length | Marker | Version | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Command | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _V1_HEADER_FMT = '!HBBH' + V1_HEADER_SIZE = struct.calcsize(_V1_HEADER_FMT) + + # Zebra Protocol Common Header (version 3): + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Length | Marker | Version | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | VRF ID | Command | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _V3_HEADER_FMT = '!HBBHH' + V3_HEADER_SIZE = struct.calcsize(_V3_HEADER_FMT) + + # Default Zebra protocol version + _DEFAULT_VERSION = 3 + + # Note: Marker should be 0xff(=255) in the version>=1 header. + _MARKER = 0xff + + def __init__(self, length=None, version=_DEFAULT_VERSION, + vrf_id=0, command=None, body=None): + super(ZebraMessage, self).__init__() + self.length = length + self.version = version + self.vrf_id = vrf_id + if body is None: + assert command is not None + else: + assert isinstance(body, _ZebraMessageBody) + if command is None: + command = _ZebraMessageBody.rev_lookup_command(body.__class__) + self.command = command + self.body = body + + @classmethod + def get_header_size(cls, version): + if version == 0: + return cls.V0_HEADER_SIZE + elif version in [1, 2]: + return cls.V1_HEADER_SIZE + elif version == 3: + return cls.V3_HEADER_SIZE + else: + raise ValueError( + 'Unsupported Zebra protocol version: %d' + % version) + + @classmethod + def parse_header(cls, buf): + (length, marker) = struct.unpack_from(cls._V0_HEADER_FMT, buf) + if marker != cls._MARKER: + command = marker + body_buf = buf[cls.V0_HEADER_SIZE:length] + # version=0, vrf_id=0 + return length, 0, 0, command, body_buf + + (length, marker, version, command) = struct.unpack_from( + cls._V1_HEADER_FMT, buf) + if version in [1, 2]: + body_buf = buf[cls.V1_HEADER_SIZE:length] + # vrf_id=0 + return length, version, 0, command, body_buf + + (length, marker, version, vrf_id, command) = struct.unpack_from( + cls._V3_HEADER_FMT, buf) + if version == 3: + body_buf = buf[cls.V3_HEADER_SIZE:length] + return length, version, vrf_id, command, body_buf + + raise struct.error( + 'Failed to parse Zebra protocol header: ' + 'marker=%d, version=%d' % (marker, version)) + + @classmethod + def _parser_impl(cls, buf, body_parser='parse'): + buf = six.binary_type(buf) + (length, version, vrf_id, command, + body_buf) = cls.parse_header(buf) + + if body_buf: + body_cls = _ZebraMessageBody.lookup_command(command) + _parser = getattr(body_cls, body_parser) + body = _parser(body_buf) + else: + body = None + + rest = buf[length:] + + return cls(length, version, vrf_id, command, body), cls, rest + + @classmethod + def parser(cls, buf): + return cls._parser_impl(buf) + + def serialize_header(self, body_len): + if self.version == 0: + self.length = self.V0_HEADER_SIZE + body_len # fixup + return struct.pack( + self._V0_HEADER_FMT, + self.length, self.command) + elif self.version in [1, 2]: + self.length = self.V1_HEADER_SIZE + body_len # fixup + return struct.pack( + self._V1_HEADER_FMT, + self.length, self._MARKER, self.version, + self.command) + elif self.version == 3: + self.length = self.V3_HEADER_SIZE + body_len # fixup + return struct.pack( + self._V3_HEADER_FMT, + self.length, self._MARKER, self.version, + self.vrf_id, self.command) + else: + raise ValueError( + 'Unsupported Zebra protocol version: %d' + % self.version) + + def serialize(self, _payload=None, _prev=None): + if isinstance(self.body, _ZebraMessageBody): + body = self.body.serialize() + else: + body = b'' + + return self.serialize_header(len(body)) + body + + +class _ZebraMessageFromZebra(ZebraMessage): + """ + This class is corresponding to the message sent from Zebra daemon. + """ + + @classmethod + def parser(cls, buf): + return cls._parser_impl(buf, body_parser='parse_from_zebra') + + +# Alias +zebra = ZebraMessage + + +# Zebra message body classes + +class _ZebraMessageBody(type_desc.TypeDisp, stringify.StringifyMixin): + """ + Base class for Zebra message body. + """ + + @classmethod + def lookup_command(cls, command): + return cls._lookup_type(command) + + @classmethod + def rev_lookup_command(cls, body_cls): + return cls._rev_lookup_type(body_cls) + + @classmethod + def parse(cls, buf): + return cls() + + @classmethod + def parse_from_zebra(cls, buf): + return cls.parse(buf) + + def serialize(self): + return b'' + + +@_ZebraMessageBody.register_unknown_type() +class ZebraUnknownMessage(_ZebraMessageBody): + """ + Message body class for Unknown command. + """ + + def __init__(self, buf): + super(ZebraUnknownMessage, self).__init__() + self.buf = buf + + @classmethod + def parse(cls, buf): + return cls(buf) + + def serialize(self): + return self.buf + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraInterface(_ZebraMessageBody): + """ + Base class for ZEBRA_INTERFACE_ADD, ZEBRA_INTERFACE_DELETE, + ZEBRA_INTERFACE_UP and ZEBRA_INTERFACE_DOWN message body. + """ + # Zebra Interface Add/Delete message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Name (INTERFACE_NAMSIZE bytes length) | + # | | + # | | + # | | + # | | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface index | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | status | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface flags | + # | | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Metric | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface's MTU for IPv4 | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface's MTU for IPv6 | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Bandwidth | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Link Layer Type) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Hardware Address Length | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Hardware Address if HW length different from 0 | + # | ... max is INTERFACE_HWADDR_MAX | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | link_params? | Whether a link-params follows: 1 or 0. + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Link params 0 or 1 INTERFACE_LINK_PARAMS_SIZE sized | + # | .... (struct if_link_params). | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!%dsIBQIIIIII' % INTERFACE_NAMSIZE + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _HEADER_SHORT_FMT = '!%dsIBQIIIII' % INTERFACE_NAMSIZE + HEADER_SHORT_SIZE = struct.calcsize(_HEADER_SHORT_FMT) + + # link_params_state (whether a link-params follows) + _LP_STATE_FMT = '!?' + LP_STATE_SIZE = struct.calcsize(_LP_STATE_FMT) + # See InterfaceLinkParams class for Link params structure + + def __init__(self, ifname, ifindex, status, if_flags, + metric, ifmtu, ifmtu6, bandwidth, + ll_type=None, hw_addr_len=0, hw_addr=None, + link_params=None): + super(_ZebraInterface, self).__init__() + self.ifname = ifname + self.ifindex = ifindex + self.status = status + self.if_flags = if_flags + self.metric = metric + self.ifmtu = ifmtu + self.ifmtu6 = ifmtu6 + self.bandwidth = bandwidth + self.ll_type = ll_type + self.hw_addr_lenght = hw_addr_len + hw_addr = hw_addr or b'' + self.hw_addr = hw_addr + assert (isinstance(link_params, InterfaceLinkParams) + or link_params is None) + self.link_params = link_params + + @classmethod + def parse(cls, buf): + ll_type = None + if (len(buf) == cls.HEADER_SHORT_SIZE + 6 # with MAC addr + or len(buf) == cls.HEADER_SHORT_SIZE): # without MAC addr + # Assumption: Case for version<=2 + (ifname, ifindex, status, if_flags, metric, + ifmtu, ifmtu6, bandwidth, + hw_addr_len) = struct.unpack_from(cls._HEADER_SHORT_FMT, buf) + rest = buf[cls.HEADER_SHORT_SIZE:] + else: + (ifname, ifindex, status, if_flags, metric, + ifmtu, ifmtu6, bandwidth, ll_type, + hw_addr_len) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + ifname = str(six.text_type(ifname.strip(b'\x00'), 'ascii')) + + hw_addr_len = min(hw_addr_len, INTERFACE_HWADDR_MAX) + hw_addr_bin = rest[:hw_addr_len] + rest = rest[hw_addr_len:] + if 0 < hw_addr_len < 7: + # Assuming MAC address + hw_addr = addrconv.mac.bin_to_text( + hw_addr_bin + b'\x00' * (6 - hw_addr_len)) + else: + # Unknown hardware address + hw_addr = hw_addr_bin + + if not rest: + return cls(ifname, ifindex, status, if_flags, metric, + ifmtu, ifmtu6, bandwidth, ll_type, + hw_addr_len, hw_addr) + + (link_param_state,) = struct.unpack_from(cls._LP_STATE_FMT, rest) + rest = rest[cls.LP_STATE_SIZE:] + + if link_param_state: + link_params, rest = InterfaceLinkParams.parse(rest) + else: + link_params = None + + return cls(ifname, ifindex, status, if_flags, metric, ifmtu, ifmtu6, + bandwidth, ll_type, hw_addr_len, hw_addr, link_params) + + def serialize(self): + # fixup + if netaddr.valid_mac(self.hw_addr): + # MAC address + hw_addr_len = 6 + hw_addr = addrconv.mac.text_to_bin(self.hw_addr) + else: + # Unknown hardware address + hw_addr_len = len(self.hw_addr) + hw_addr = self.hw_addr + + if self.ll_type: + # Assumption: version<=2 + buf = struct.pack( + self._HEADER_FMT, + self.ifname.encode('ascii'), self.ifindex, self.status, + self.if_flags, self.metric, self.ifmtu, self.ifmtu6, + self.bandwidth, self.ll_type, hw_addr_len) + hw_addr + else: + buf = struct.pack( + self._HEADER_SHORT_FMT, + self.ifname.encode('ascii'), self.ifindex, self.status, + self.if_flags, self.metric, self.ifmtu, self.ifmtu6, + self.bandwidth, hw_addr_len) + hw_addr + + if isinstance(self.link_params, InterfaceLinkParams): + buf += struct.pack(self._LP_STATE_FMT, True) + buf += self.link_params.serialize() + elif self.ll_type is None: + # Assumption: version<=2 + pass + else: + buf += struct.pack(self._LP_STATE_FMT, False) + + return buf + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_ADD) +class ZebraInterfaceAdd(_ZebraInterface): + """ + Message body class for ZEBRA_INTERFACE_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_DELETE) +class ZebraInterfaceDelete(_ZebraInterface): + """ + Message body class for ZEBRA_INTERFACE_DELETE. + """ + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraInterfaceAddress(_ZebraMessageBody): + """ + Base class for ZEBRA_INTERFACE_ADDRESS_ADD and + ZEBRA_INTERFACE_ADDRESS_DELETE message body. + """ + # Zebra Interface Address Add/Delete message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface index | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IFC Flags | flags for connected address + # +-+-+-+-+-+-+-+-+ + # | Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 Prefix (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Prefix len | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 Destination Address (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!IBB' # ifindex, ifc_flags, family + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _IPV4_BODY_FMT = '!4sB4s' # prefix, prefix_len, dest + _IPV6_BODY_FMT = '!16sB16s' + + def __init__(self, ifindex, ifc_flags, family, prefix, dest): + super(_ZebraInterfaceAddress, self).__init__() + self.ifindex = ifindex + self.ifc_flags = ifc_flags + self.family = family + if isinstance(prefix, (IPv4Prefix, IPv6Prefix)): + prefix = prefix.prefix + self.prefix = prefix + assert netaddr.valid_ipv4(dest) or netaddr.valid_ipv6(dest) + self.dest = dest + + @classmethod + def parse(cls, buf): + (ifindex, ifc_flags, + family) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + if socket.AF_INET == family: + (prefix, p_len, + dest) = struct.unpack_from(cls._IPV4_BODY_FMT, rest) + prefix = '%s/%d' % (addrconv.ipv4.bin_to_text(prefix), p_len) + dest = addrconv.ipv4.bin_to_text(dest) + elif socket.AF_INET6 == family: + (prefix, p_len, + dest) = struct.unpack_from(cls._IPV6_BODY_FMT, rest) + prefix = '%s/%d' % (addrconv.ipv6.bin_to_text(prefix), p_len) + dest = addrconv.ipv6.bin_to_text(dest) + else: + raise struct.error('Unsupported family: %d' % family) + + return cls(ifindex, ifc_flags, family, prefix, dest) + + def serialize(self): + if ip.valid_ipv4(self.prefix): + self.family = socket.AF_INET # fixup + prefix_addr, prefix_num = self.prefix.split('/') + body_bin = struct.pack( + self._IPV4_BODY_FMT, + addrconv.ipv4.text_to_bin(prefix_addr), + int(prefix_num), + addrconv.ipv4.text_to_bin(self.dest)) + elif ip.valid_ipv6(self.prefix): + self.family = socket.AF_INET6 # fixup + prefix_addr, prefix_num = self.prefix.split('/') + body_bin = struct.pack( + self._IPV6_BODY_FMT, + addrconv.ipv6.text_to_bin(prefix_addr), + int(prefix_num), + addrconv.ipv6.text_to_bin(self.dest)) + else: + raise ValueError( + 'Invalid address family for prefix=%s and dest=%s' + % (self.prefix, self.dest)) + + buf = struct.pack(self._HEADER_FMT, + self.ifindex, self.ifc_flags, self.family) + + return buf + body_bin + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_ADDRESS_ADD) +class ZebraInterfaceAddressAdd(_ZebraInterfaceAddress): + """ + Message body class for ZEBRA_INTERFACE_ADDRESS_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_ADDRESS_DELETE) +class ZebraInterfaceAddressDelete(_ZebraInterfaceAddress): + """ + Message body class for ZEBRA_INTERFACE_ADDRESS_DELETE. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_UP) +class ZebraInterfaceUp(_ZebraInterface): + """ + Message body class for ZEBRA_INTERFACE_UP. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_DOWN) +class ZebraInterfaceDown(_ZebraInterface): + """ + Message body class for ZEBRA_INTERFACE_DOWN. + """ + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraIPRoute(_ZebraMessageBody): + """ + Base class for ZEBRA_IPV4_ROUTE_* and ZEBRA_IPV6_ROUTE_* + message body. + + .. Note:: + + Zebra IPv4/IPv6 Route message have asymmetric structure. + If the message sent from Zebra Daemon, set 'from_zebra=True' to + create an instance of this class. + """ + # Zebra IPv4/IPv6 Route message body (Protocol Daemons -> Zebra Daemon): + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Route Type | Flags | Message | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | SAFI | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 Prefix (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthop Num | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthops (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Distance) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Metric) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (MTU) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (TAG) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # + # Zebra IPv4/IPv6 Route message body (Zebra Daemon -> Protocol Daemons): + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Route Type | Flags | Message | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 Prefix (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Nexthop Num) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Nexthops (Variable)) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (IFIndex Num) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Interface indexes) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Distance) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (Metric) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (MTU) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | (TAG) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!BBB' # type, flags, message + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + _SAFI_FMT = '!H' # safi + SAFI_SIZE = struct.calcsize(_SAFI_FMT) + _NUM_FMT = '!B' # nexthop_num or ifindex_num + NUM_SIZE = struct.calcsize(_NUM_FMT) + _IFINDEX_FMT = '!I' # ifindex + IFINDEX_SIZE = struct.calcsize(_IFINDEX_FMT) + + # API type specific constants + _FAMILY = None # either socket.AF_INET or socket.AF_INET6 + + def __init__(self, route_type, flags, message, safi=None, prefix=None, + nexthops=None, ifindexes=None, + distance=None, metric=None, mtu=None, tag=None, + from_zebra=False): + super(_ZebraIPRoute, self).__init__() + self.route_type = route_type + self.flags = flags + self.message = message + + # SAFI should be included if this message sent to Zebra. + if from_zebra: + self.safi = None + else: + self.safi = safi or packet_safi.UNICAST + + assert prefix is not None + if isinstance(prefix, (IPv4Prefix, IPv6Prefix)): + prefix = prefix.prefix + self.prefix = prefix + + # Nexthops should be a list of str representations of IP address + # if this message sent from Zebra, otherwise a list of _Nexthop + # subclasses. + nexthops = nexthops or [] + if from_zebra: + for nexthop in nexthops: + assert (netaddr.valid_ipv4(nexthop) + or netaddr.valid_ipv6(nexthop)) + else: + for nexthop in nexthops: + assert isinstance(nexthop, _NextHop) + self.nexthops = nexthops + + # Interface indexes should be included if this message sent from + # Zebra. + if from_zebra: + ifindexes = ifindexes or [] + for ifindex in ifindexes: + assert isinstance(ifindex, six.integer_types) + self.ifindexes = ifindexes + else: + self.ifindexes = None + + self.distance = distance + self.metric = metric + self.mtu = mtu + self.tag = tag + + # is this message sent from Zebra message or not. + self.from_zebra = from_zebra + + @classmethod + def _parse_message_option(cls, message, flag, fmt, buf): + if message & flag: + (option,) = struct.unpack_from(fmt, buf) + return option, buf[struct.calcsize(fmt):] + else: + return None, buf + + @classmethod + def _parse_impl(cls, buf, from_zebra=False): + (route_type, flags, message,) = struct.unpack_from( + cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + if from_zebra: + safi = None + else: + (safi,) = struct.unpack_from(cls._SAFI_FMT, rest) + rest = rest[cls.SAFI_SIZE:] + + prefix, rest = _parse_ip_prefix(cls._FAMILY, rest) + + if from_zebra and message & ZAPI_MESSAGE_NEXTHOP: + nexthops = [] + (nexthop_num,) = struct.unpack_from(cls._NUM_FMT, rest) + rest = rest[cls.NUM_SIZE:] + if cls._FAMILY == socket.AF_INET: + for _ in range(nexthop_num): + nexthop = addrconv.ipv4.bin_to_text(rest[:4]) + nexthops.append(nexthop) + rest = rest[4:] + else: # cls._FAMILY == socket.AF_INET6: + for _ in range(nexthop_num): + nexthop = addrconv.ipv6.bin_to_text(rest[:16]) + nexthops.append(nexthop) + rest = rest[16:] + else: + nexthops, rest = _parse_nexthops(rest) + + ifindexes = [] + if from_zebra and message & ZAPI_MESSAGE_IFINDEX: + (ifindex_num,) = struct.unpack_from(cls._NUM_FMT, rest) + rest = rest[cls.NUM_SIZE:] + for _ in range(ifindex_num): + (ifindex,) = struct.unpack_from(cls._IFINDEX_FMT, rest) + ifindexes.append(ifindex) + rest = rest[cls.IFINDEX_SIZE:] + + distance, rest = cls._parse_message_option( + message, ZAPI_MESSAGE_DISTANCE, '!B', rest) + metric, rest = cls._parse_message_option( + message, ZAPI_MESSAGE_METRIC, '!I', rest) + mtu, rest = cls._parse_message_option( + message, ZAPI_MESSAGE_MTU, '!I', rest) + tag, rest = cls._parse_message_option( + message, ZAPI_MESSAGE_TAG, '!I', rest) + + return cls(route_type, flags, message, safi, prefix, + nexthops, ifindexes, + distance, metric, mtu, tag, + from_zebra=from_zebra) + + @classmethod + def parse(cls, buf): + return cls._parse_impl(buf) + + @classmethod + def parse_from_zebra(cls, buf): + return cls._parse_impl(buf, from_zebra=True) + + def _serialize_message_option(self, option, flag, fmt): + if option is None: + return b'' + + # fixup + self.message |= flag + + return struct.pack(fmt, option) + + def serialize(self): + prefix = _serialize_ip_prefix(self.prefix) + + nexthops = b'' + if self.nexthops: + self.message |= ZAPI_MESSAGE_NEXTHOP # fixup + if self.from_zebra: + nexthops += struct.pack(self._NUM_FMT, len(self.nexthops)) + for nexthop in self.nexthops: + nexthops += ip.text_to_bin(nexthop) + else: + nexthops = _serialize_nexthops(self.nexthops) + + ifindexes = b'' + if self.ifindexes and self.from_zebra: + self.message |= ZAPI_MESSAGE_IFINDEX # fixup + ifindexes += struct.pack(self._NUM_FMT, len(self.ifindexes)) + for ifindex in self.ifindexes: + ifindexes += struct.pack(self._IFINDEX_FMT, ifindex) + + options = self._serialize_message_option( + self.distance, ZAPI_MESSAGE_DISTANCE, '!B') + options += self._serialize_message_option( + self.metric, ZAPI_MESSAGE_METRIC, '!I') + options += self._serialize_message_option( + self.mtu, ZAPI_MESSAGE_MTU, '!I') + options += self._serialize_message_option( + self.tag, ZAPI_MESSAGE_TAG, '!I') + + header = struct.pack( + self._HEADER_FMT, self.route_type, self.flags, self.message) + if not self.from_zebra: + header += struct.pack(self._SAFI_FMT, self.safi) + + return header + prefix + nexthops + ifindexes + options + + +class _ZebraIPv4Route(_ZebraIPRoute): + """ + Base class for ZEBRA_IPV4_ROUTE_* message body. + """ + _FAMILY = socket.AF_INET + + +@_ZebraMessageBody.register_type(ZEBRA_IPV4_ROUTE_ADD) +class ZebraIPv4RouteAdd(_ZebraIPv4Route): + """ + Message body class for ZEBRA_IPV4_ROUTE_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_IPV4_ROUTE_DELETE) +class ZebraIPv4RouteDelete(_ZebraIPv4Route): + """ + Message body class for ZEBRA_IPV4_ROUTE_DELETE. + """ + + +class _ZebraIPv6Route(_ZebraIPRoute): + """ + Base class for ZEBRA_IPV6_ROUTE_* message body. + """ + _FAMILY = socket.AF_INET6 + + +@_ZebraMessageBody.register_type(ZEBRA_IPV6_ROUTE_ADD) +class ZebraIPv6RouteAdd(_ZebraIPv6Route): + """ + Message body class for ZEBRA_IPV6_ROUTE_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_IPV6_ROUTE_DELETE) +class ZebraIPv6RouteDelete(_ZebraIPv6Route): + """ + Message body class for ZEBRA_IPV6_ROUTE_DELETE. + """ + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraRedistribute(_ZebraMessageBody): + """ + Base class for ZEBRA_REDISTRIBUTE_ADD and ZEBRA_REDISTRIBUTE_DELETE + message body. + """ + # Zebra Redistribute message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Route Type | + # +-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!B' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, route_type): + super(_ZebraRedistribute, self).__init__() + self.route_type = route_type + + @classmethod + def parse(cls, buf): + (route_type,) = struct.unpack_from(cls._HEADER_FMT, buf) + + return cls(route_type) + + def serialize(self): + return struct.pack(self._HEADER_FMT, self.route_type) + + +@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_ADD) +class ZebraRedistributeAdd(_ZebraRedistribute): + """ + Message body class for ZEBRA_REDISTRIBUTE_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_DELETE) +class ZebraRedistributeDelete(_ZebraRedistribute): + """ + Message body class for ZEBRA_REDISTRIBUTE_DELETE. + """ + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraRedistributeDefault(_ZebraMessageBody): + """ + Base class for ZEBRA_REDISTRIBUTE_DEFAULT_ADD and + ZEBRA_REDISTRIBUTE_DEFAULT_DELETE message body. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_DEFAULT_ADD) +class ZebraRedistributeDefaultAdd(_ZebraRedistribute): + """ + Message body class for ZEBRA_REDISTRIBUTE_DEFAULT_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_REDISTRIBUTE_DEFAULT_DELETE) +class ZebraRedistributeDefaultDelete(_ZebraRedistribute): + """ + Message body class for ZEBRA_REDISTRIBUTE_DEFAULT_DELETE. + """ + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraIPNexthopLookup(_ZebraMessageBody): + """ + Base class for ZEBRA_IPV4_NEXTHOP_LOOKUP and + ZEBRA_IPV6_NEXTHOP_LOOKUP message body. + """ + # Zebra IPv4/v6 Nexthop Lookup message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 address | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Metric | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthop Num | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthops (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _METRIC_FMT = '!I' # metric + METRIC_SIZE = struct.calcsize(_METRIC_FMT) + + # Message type specific constants + ADDR_CLS = None # either addrconv.ipv4 or addrconv.ipv6 + ADDR_LEN = None # IP address length in bytes + + def __init__(self, addr, metric=None, nexthops=None): + super(_ZebraIPNexthopLookup, self).__init__() + assert netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr) + self.addr = addr + self.metric = metric + nexthops = nexthops or [] + for nexthop in nexthops: + assert isinstance(nexthop, _NextHop) + self.nexthops = nexthops + + @classmethod + def parse(cls, buf): + addr = cls.ADDR_CLS.bin_to_text(buf[:cls.ADDR_LEN]) + rest = buf[cls.ADDR_LEN:] + + metric = None + if rest: + # Note: Case for ZEBRA_IPV4_NEXTHOP_LOOKUP request + (metric,) = struct.unpack_from(cls._METRIC_FMT, rest) + rest = rest[cls.METRIC_SIZE:] + + nexthops = None + if rest: + nexthops, rest = _parse_nexthops(rest) + + return cls(addr, metric, nexthops) + + def serialize(self): + buf = self.ADDR_CLS.text_to_bin(self.addr) + + if self.metric is None: + return buf + + buf += struct.pack(self._METRIC_FMT, self.metric) + + return buf + _serialize_nexthops(self.nexthops) + + +@_ZebraMessageBody.register_type(ZEBRA_IPV4_NEXTHOP_LOOKUP) +class ZebraIPv4NexthopLookup(_ZebraIPNexthopLookup): + """ + Message body class for ZEBRA_IPV4_NEXTHOP_LOOKUP. + """ + ADDR_CLS = addrconv.ipv4 + ADDR_LEN = 4 + + +@_ZebraMessageBody.register_type(ZEBRA_IPV6_NEXTHOP_LOOKUP) +class ZebraIPv6NexthopLookup(_ZebraIPNexthopLookup): + """ + Message body class for ZEBRA_IPV6_NEXTHOP_LOOKUP. + """ + ADDR_CLS = addrconv.ipv6 + ADDR_LEN = 16 + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraIPImportLookup(_ZebraMessageBody): + """ + Base class for ZEBRA_IPV4_IMPORT_LOOKUP and + ZEBRA_IPV6_IMPORT_LOOKUP message body. + """ + # Zebra IPv4/v6 Import Lookup message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 prefix | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Metric | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthop Num | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthops (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _METRIC_FMT = '!I' # metric + METRIC_SIZE = struct.calcsize(_METRIC_FMT) + + # Message type specific constants + PREFIX_CLS = None # either addrconv.ipv4 or addrconv.ipv6 + PREFIX_LEN = None # IP prefix length in bytes + + def __init__(self, prefix, metric=None, nexthops=None): + super(_ZebraIPImportLookup, self).__init__() + assert netaddr.valid_ipv4(prefix) or netaddr.valid_ipv6(prefix) + self.prefix = prefix + self.metric = metric + nexthops = nexthops or [] + for nexthop in nexthops: + assert isinstance(nexthop, _NextHop) + self.nexthops = nexthops + + @classmethod + def parse(cls, buf): + prefix = cls.PREFIX_CLS.bin_to_text(buf[:cls.PREFIX_LEN]) + rest = buf[cls.PREFIX_LEN:] + + metric = None + if rest: + (metric,) = struct.unpack_from(cls._METRIC_FMT, rest) + rest = rest[cls.METRIC_SIZE:] + + nexthops = None + if rest: + nexthops, rest = _parse_nexthops(rest) + + return cls(prefix, metric, nexthops) + + def serialize(self): + buf = self.PREFIX_CLS.text_to_bin(self.prefix) + + if self.metric is None: + return buf + + buf += struct.pack(self._METRIC_FMT, self.metric) + + return buf + _serialize_nexthops(self.nexthops) + + +@_ZebraMessageBody.register_type(ZEBRA_IPV4_IMPORT_LOOKUP) +class ZebraIPv4ImportLookup(_ZebraIPImportLookup): + """ + Message body class for ZEBRA_IPV4_IMPORT_LOOKUP. + """ + PREFIX_CLS = addrconv.ipv4 + PREFIX_LEN = 4 + + +@_ZebraMessageBody.register_type(ZEBRA_IPV6_IMPORT_LOOKUP) +class ZebraIPv6ImportLookup(_ZebraIPImportLookup): + """ + Message body class for ZEBRA_IPV6_IMPORT_LOOKUP. + """ + PREFIX_CLS = addrconv.ipv6 + PREFIX_LEN = 16 + + +# Note: Not implemented in quagga/zebra/zserv.c +# @_ZebraMessageBody.register_type(ZEBRA_INTERFACE_RENAME) +# class ZebraInterfaceRename(_ZebraMessageBody): + + +@_ZebraMessageBody.register_type(ZEBRA_ROUTER_ID_ADD) +class ZebraRouterIDAdd(_ZebraMessageBody): + """ + Message body class for ZEBRA_ROUTER_ID_ADD. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_ROUTER_ID_DELETE) +class ZebraRouterIDDelete(_ZebraMessageBody): + """ + Message body class for ZEBRA_ROUTER_ID_DELETE. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_ROUTER_ID_UPDATE) +class ZebraRouterIDUpdate(_ZebraMessageBody): + """ + Message body class for ZEBRA_ROUTER_ID_UPDATE. + """ + # Zebra Router ID Update message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 prefix | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Prefix len | + # +-+-+-+-+-+-+-+-+ + _FAMILY_FMT = '!B' + FAMILY_SIZE = struct.calcsize(_FAMILY_FMT) + _IPV4_BODY_FMT = '!4sB' # prefix, prefix_len + _IPV6_BODY_FMT = '!16sB' + + def __init__(self, family, prefix): + super(ZebraRouterIDUpdate, self).__init__() + self.family = family + if isinstance(prefix, (IPv4Prefix, IPv6Prefix)): + prefix = prefix.prefix + self.prefix = prefix + + @classmethod + def parse(cls, buf): + (family,) = struct.unpack_from(cls._FAMILY_FMT, buf) + rest = buf[cls.FAMILY_SIZE:] + + if socket.AF_INET == family: + (prefix, p_len) = struct.unpack_from(cls._IPV4_BODY_FMT, rest) + prefix = '%s/%d' % (addrconv.ipv4.bin_to_text(prefix), p_len) + elif socket.AF_INET6 == family: + (prefix, p_len) = struct.unpack_from(cls._IPV6_BODY_FMT, rest) + prefix = '%s/%d' % (addrconv.ipv6.bin_to_text(prefix), p_len) + else: + raise struct.error('Unsupported family: %d' % family) + + return cls(family, prefix) + + def serialize(self): + if ip.valid_ipv4(self.prefix): + self.family = socket.AF_INET # fixup + prefix_addr, prefix_num = self.prefix.split('/') + body_bin = struct.pack( + self._IPV4_BODY_FMT, + addrconv.ipv4.text_to_bin(prefix_addr), + int(prefix_num)) + elif ip.valid_ipv6(self.prefix): + self.family = socket.AF_INET6 # fixup + prefix_addr, prefix_num = self.prefix.split('/') + body_bin = struct.pack( + self._IPV6_BODY_FMT, + addrconv.ipv6.text_to_bin(prefix_addr), + int(prefix_num)) + else: + raise ValueError('Invalid prefix: %s' % self.prefix) + + return struct.pack(self._FAMILY_FMT, self.family) + body_bin + + +@_ZebraMessageBody.register_type(ZEBRA_HELLO) +class ZebraHello(_ZebraMessageBody): + """ + Message body class for ZEBRA_HELLO. + """ + # Zebra Hello message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Route Type | + # +-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!B' + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + + def __init__(self, route_type=ZEBRA_ROUTE_MAX): + super(ZebraHello, self).__init__() + self.route_type = route_type + + @classmethod + def parse(cls, buf): + route_type = None + if buf: + (route_type,) = struct.unpack_from(cls._HEADER_FMT, buf) + + return cls(route_type) + + def serialize(self): + return struct.pack(self._HEADER_FMT, self.route_type) + + +@six.add_metaclass(abc.ABCMeta) +class _ZebraIPNexthopLookupMRib(_ZebraMessageBody): + """ + Base class for ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB (and + ZEBRA_IPV6_NEXTHOP_LOOKUP_MRIB) message body. + """ + # Zebra IPv4/v6 Nexthop Lookup MRIB message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 address | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Distance | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Metric | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthop Num | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthops (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _DISTANCE_METRIC_FMT = '!I' # metric + DISTANCE_METRIC_SIZE = struct.calcsize(_DISTANCE_METRIC_FMT) + + # Message type specific constants + ADDR_CLS = None # either addrconv.ipv4 or addrconv.ipv6 + ADDR_LEN = None # IP address length in bytes + + def __init__(self, addr, distance, metric, nexthops=None): + super(_ZebraIPNexthopLookupMRib, self).__init__() + assert netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr) + self.addr = addr + self.distance = distance + self.metric = metric + nexthops = nexthops or [] + for nexthop in nexthops: + assert isinstance(nexthop, _NextHop) + self.nexthops = nexthops + + @classmethod + def parse(cls, buf): + addr = cls.ADDR_CLS.bin_to_text(buf[:cls.ADDR_LEN]) + rest = buf[cls.ADDR_LEN:] + + (metric,) = struct.unpack_from(cls._DISTANCE_METRIC_FMT, rest) + rest = rest[cls.DISTANCE_METRIC_SIZE:] + + nexthops, rest = _parse_nexthops(rest) + + return cls(addr, metric, nexthops) + + def serialize(self): + buf = self.ADDR_CLS.text_to_bin(self.addr) + + buf += struct.pack( + self._DISTANCE_METRIC_FMT, self.distance, self.metric) + + return buf + self._serialize_nexthops(self.nexthops) + + +@_ZebraMessageBody.register_type(ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB) +class ZebraIPv4NexthopLookupMRib(_ZebraIPNexthopLookupMRib): + """ + Message body class for ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB. + """ + ADDR_CLS = addrconv.ipv4 + ADDR_LEN = 4 + + +@_ZebraMessageBody.register_type(ZEBRA_VRF_UNREGISTER) +class ZebraVrfUnregister(_ZebraMessageBody): + """ + Message body class for ZEBRA_VRF_UNREGISTER. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_INTERFACE_LINK_PARAMS) +class ZebraInterfaceLinkParams(_ZebraMessageBody): + """ + Message body class for ZEBRA_INTERFACE_LINK_PARAMS. + """ + # Zebra Interface Link Parameters message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Index | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Interface Link Parameters | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _HEADER_FMT = '!I' # ifindex + HEADER_SIZE = struct.calcsize(_HEADER_FMT) + # See InterfaceLinkParams class for Interface Link Parameters structure + + def __init__(self, ifindex, link_params): + super(ZebraInterfaceLinkParams, self).__init__() + self.ifindex = ifindex + assert isinstance(link_params, InterfaceLinkParams) + self.link_params = link_params + + @classmethod + def parse(cls, buf): + (ifindex,) = struct.unpack_from(cls._HEADER_FMT, buf) + rest = buf[cls.HEADER_SIZE:] + + link_params, rest = InterfaceLinkParams.parse(rest) + + return cls(ifindex, link_params) + + def serialize(self): + buf = struct.pack(self._HEADER_FMT, self.ifindex) + + return buf + self.link_params.serialize() + + +class _ZebraNexthopRegister(_ZebraMessageBody): + """ + Base class for ZEBRA_NEXTHOP_REGISTER and ZEBRA_NEXTHOP_UNREGISTER + message body. + """ + # Zebra Nexthop Register message body: + # (Repeat of RegisteredNexthop class) + + def __init__(self, nexthops): + super(_ZebraNexthopRegister, self).__init__() + nexthops = nexthops or [] + for nexthop in nexthops: + assert isinstance(nexthop, RegisteredNexthop) + self.nexthops = nexthops + + @classmethod + def parse(cls, buf): + nexthops = [] + while buf: + nexthop, buf = RegisteredNexthop.parse(buf) + nexthops.append(nexthop) + + return cls(nexthops) + + def serialize(self): + buf = b'' + for nexthop in self.nexthops: + buf += nexthop.serialize() + + return buf + + +@_ZebraMessageBody.register_type(ZEBRA_NEXTHOP_REGISTER) +class ZebraNexthopRegister(_ZebraNexthopRegister): + """ + Message body class for ZEBRA_NEXTHOP_REGISTER. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_NEXTHOP_UNREGISTER) +class ZebraNexthopUnregister(_ZebraNexthopRegister): + """ + Message body class for ZEBRA_NEXTHOP_UNREGISTER. + """ + + +@_ZebraMessageBody.register_type(ZEBRA_NEXTHOP_UPDATE) +class ZebraNexthopUpdate(_ZebraMessageBody): + """ + Message body class for ZEBRA_NEXTHOP_UPDATE. + """ + # Zebra IPv4/v6 Nexthop Update message body: + # 0 1 2 3 + # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Family | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | IPv4/v6 prefix | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Metric | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthop Num | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + # | Nexthops (Variable) | + # +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + _FAMILY_FMT = '!H' # family + FAMILY_SIZE = struct.calcsize(_FAMILY_FMT) + _METRIC_FMT = '!I' # metric + METRIC_SIZE = struct.calcsize(_METRIC_FMT) + + def __init__(self, family, prefix, metric, nexthops=None): + super(ZebraNexthopUpdate, self).__init__() + self.family = family + if isinstance(prefix, (IPv4Prefix, IPv6Prefix)): + prefix = prefix.prefix + self.prefix = prefix + self.metric = metric + nexthops = nexthops or [] + for nexthop in nexthops: + assert isinstance(nexthop, _NextHop) + self.nexthops = nexthops + + @classmethod + def parse(cls, buf): + (family,) = struct.unpack_from(cls._FAMILY_FMT, buf) + rest = buf[cls.FAMILY_SIZE:] + + prefix, rest = _parse_ip_prefix(family, rest) + + (metric,) = struct.unpack_from(cls._METRIC_FMT, rest) + rest = rest[cls.METRIC_SIZE:] + + nexthops, rest = _parse_nexthops(rest) + + return cls(family, prefix, metric, nexthops) + + def serialize(self): + # fixup + if ip.valid_ipv4(self.prefix): + self.family = socket.AF_INET + elif ip.valid_ipv6(self.prefix): + self.family = socket.AF_INET6 + else: + raise ValueError('Invalid prefix: %s' % self.prefix) + + buf = struct.pack(self._FAMILY_FMT, self.family) + + buf += _serialize_ip_prefix(self.prefix) + + buf += struct.pack(self._METRIC_FMT, self.metric) + + return buf + _serialize_nexthops(self.nexthops) diff -Nru ryu-4.9/ryu/lib/stringify.py ryu-4.15/ryu/lib/stringify.py --- ryu-4.9/ryu/lib/stringify.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/stringify.py 2017-07-02 11:08:32.000000000 +0000 @@ -144,6 +144,15 @@ _class_prefixes = [] _class_suffixes = [] + # List of attributes ignored in the str and json representations. + _base_attributes = [] + + # Optional attributes included in the str and json representations. + # e.g.) In case of attributes are property, the attributes will be + # skipped in the str and json representations. + # Then, please specify the attributes into this list. + _opt_attributes = [] + def stringify_attrs(self): """an override point for sub classes""" return obj_python_attrs(self) @@ -368,14 +377,17 @@ yield(k, getattr(msg_, k)) return base = getattr(msg_, '_base_attributes', []) + opt = getattr(msg_, '_opt_attributes', []) for k, v in inspect.getmembers(msg_): - if k.startswith('_'): + if k in opt: + pass + elif k.startswith('_'): continue - if callable(v): + elif callable(v): continue - if k in base: + elif k in base: continue - if hasattr(msg_.__class__, k): + elif hasattr(msg_.__class__, k): continue yield (k, v) diff -Nru ryu-4.9/ryu/lib/type_desc.py ryu-4.15/ryu/lib/type_desc.py --- ryu-4.9/ryu/lib/type_desc.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/lib/type_desc.py 2017-07-02 11:08:32.000000000 +0000 @@ -122,3 +122,40 @@ return base64.b64encode(data) from_user = staticmethod(base64.b64decode) + + +class TypeDisp(object): + _TYPES = {} + _REV_TYPES = None + _UNKNOWN_TYPE = None + + @classmethod + def register_unknown_type(cls): + def _register_type(subcls): + cls._UNKNOWN_TYPE = subcls + return subcls + return _register_type + + @classmethod + def register_type(cls, type_): + cls._TYPES = cls._TYPES.copy() + + def _register_type(subcls): + cls._TYPES[type_] = subcls + cls._REV_TYPES = None + return subcls + return _register_type + + @classmethod + def _lookup_type(cls, type_): + try: + return cls._TYPES[type_] + except KeyError: + return cls._UNKNOWN_TYPE + + @classmethod + def _rev_lookup_type(cls, targ_cls): + if cls._REV_TYPES is None: + rev = dict((v, k) for k, v in cls._TYPES.items()) + cls._REV_TYPES = rev + return cls._REV_TYPES[targ_cls] diff -Nru ryu-4.9/ryu/ofproto/nicira_ext.py ryu-4.15/ryu/ofproto/nicira_ext.py --- ryu-4.9/ryu/ofproto/nicira_ext.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/ofproto/nicira_ext.py 2017-07-02 11:08:32.000000000 +0000 @@ -269,6 +269,11 @@ NX_LEARN_DST_RESERVED = 3 << 11 # Not yet defined. NX_LEARN_DST_MASK = 3 << 11 +# nx_ct constants +NX_CT_F_COMMIT = 1 << 0 +NX_CT_F_FORCE = 1 << 1 +NX_CT_RECIRC_NONE = 0xff # OFPTT_ALL + # nx_nat constants NX_NAT_RANGE_IPV4_MIN = 1 << 0 NX_NAT_RANGE_IPV4_MAX = 1 << 1 @@ -277,6 +282,18 @@ NX_NAT_RANGE_PROTO_MIN = 1 << 4 NX_NAT_RANGE_PROTO_MAX = 1 << 5 +# nx ip_frag constants +FLOW_NW_FRAG_ANY = 1 << 0 # Set for any IP frag. +FLOW_NW_FRAG_LATER = 1 << 1 # Set for IP frag with nonzero offset. +FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER + +# nx ip_frag match values +NXM_IP_FRAG_NO = (0, FLOW_NW_FRAG_MASK) +NXM_IP_FRAG_YES = (FLOW_NW_FRAG_ANY, FLOW_NW_FRAG_ANY) +NXM_IP_FRAG_FIRST = (FLOW_NW_FRAG_ANY, FLOW_NW_FRAG_MASK) +NXM_IP_FRAG_LATER = (FLOW_NW_FRAG_LATER, FLOW_NW_FRAG_LATER) +NXM_IP_FRAG_NOT_LATER = (0, FLOW_NW_FRAG_LATER) + def ofs_nbits(start, end): """ diff -Nru ryu-4.9/ryu/ofproto/ofproto_parser.py ryu-4.15/ryu/ofproto/ofproto_parser.py --- ryu-4.9/ryu/ofproto/ofproto_parser.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/ofproto/ofproto_parser.py 2017-07-02 11:08:32.000000000 +0000 @@ -125,6 +125,58 @@ return cls.from_jsondict(v, datapath=dp) +def ofp_instruction_from_jsondict(dp, jsonlist, encap=True): + """ + This function is intended to be used with + ryu.lib.ofctl_string.ofp_instruction_from_str. + It is very similar to ofp_msg_from_jsondict, but works on + a list of OFPInstructions/OFPActions. It also encapsulates + OFPAction into OFPInstructionActions, as >OF1.0 OFPFlowMod + requires that. + + This function takes the following arguments. + + ======== ================================================== + Argument Description + ======== ================================================== + dp An instance of ryu.controller.Datapath. + jsonlist A list of JSON style dictionaries. + encap Encapsulate OFPAction into OFPInstructionActions. + Must be false for OF10. + ======== ================================================== + """ + proto = dp.ofproto + parser = dp.ofproto_parser + result = [] + for jsondict in jsonlist: + assert len(jsondict) == 1 + k, v = list(jsondict.items())[0] + cls = getattr(parser, k) + if not issubclass(cls, parser.OFPAction): + ofpinst = getattr(parser, 'OFPInstruction', None) + if not ofpinst or not issubclass(cls, ofpinst): + raise ValueError("Supplied jsondict is of wrong type: %s", + jsondict) + result.append(cls.from_jsondict(v)) + + if not encap: + return result + insts = [] + actions = [] + result.append(None) # sentinel + for act_or_inst in result: + if isinstance(act_or_inst, parser.OFPAction): + actions.append(act_or_inst) + else: + if actions: + insts.append(parser.OFPInstructionActions( + proto.OFPIT_APPLY_ACTIONS, actions)) + actions = [] + if act_or_inst is not None: + insts.append(act_or_inst) + return insts + + class StringifyMixin(stringify.StringifyMixin): _class_prefixes = ["OFP", "ONF", "MT", "NX"] diff -Nru ryu-4.9/ryu/ofproto/oxx_fields.py ryu-4.15/ryu/ofproto/oxx_fields.py --- ryu-4.9/ryu/ofproto/oxx_fields.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/ofproto/oxx_fields.py 2017-07-02 11:08:32.000000000 +0000 @@ -72,6 +72,9 @@ value = t.from_user(value) if mask is not None: mask = t.from_user(mask) + elif isinstance(value, tuple): + # This hack is to accomodate CIDR notations with IPv[46]Addr. + value, mask = value return num, value, mask diff -Nru ryu-4.9/ryu/services/protocols/bgp/api/base.py ryu-4.15/ryu/services/protocols/bgp/api/base.py --- ryu-4.9/ryu/services/protocols/bgp/api/base.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/api/base.py 2017-07-02 11:08:32.000000000 +0000 @@ -45,6 +45,7 @@ EVPN_ROUTE_TYPE = 'route_type' EVPN_ESI = 'esi' EVPN_ETHERNET_TAG_ID = 'ethernet_tag_id' +REDUNDANCY_MODE = 'redundancy_mode' MAC_ADDR = 'mac_addr' IP_ADDR = 'ip_addr' IP_PREFIX = 'ip_prefix' @@ -53,6 +54,9 @@ TUNNEL_TYPE = 'tunnel_type' EVPN_VNI = 'vni' PMSI_TUNNEL_TYPE = 'pmsi_tunnel_type' +FLOWSPEC_FAMILY = 'flowspec_family' +FLOWSPEC_RULES = 'rules' +FLOWSPEC_ACTIONS = 'actions' # API call registry _CALL_REGISTRY = {} diff -Nru ryu-4.9/ryu/services/protocols/bgp/api/prefix.py ryu-4.15/ryu/services/protocols/bgp/api/prefix.py --- ryu-4.9/ryu/services/protocols/bgp/api/prefix.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/api/prefix.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,13 +18,30 @@ """ import logging +from ryu.lib.packet.bgp import EvpnEsi +from ryu.lib.packet.bgp import EvpnNLRI +from ryu.lib.packet.bgp import EvpnEthernetAutoDiscoveryNLRI from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI from ryu.lib.packet.bgp import EvpnInclusiveMulticastEthernetTagNLRI +from ryu.lib.packet.bgp import EvpnEthernetSegmentNLRI from ryu.lib.packet.bgp import EvpnIpPrefixNLRI from ryu.lib.packet.bgp import BGPPathAttributePmsiTunnel +from ryu.lib.packet.bgp import FlowSpecIPv4NLRI +from ryu.lib.packet.bgp import FlowSpecIPv6NLRI +from ryu.lib.packet.bgp import FlowSpecVPNv4NLRI +from ryu.lib.packet.bgp import FlowSpecVPNv6NLRI +from ryu.lib.packet.bgp import FlowSpecL2VPNNLRI +from ryu.lib.packet.bgp import BGPFlowSpecTrafficRateCommunity +from ryu.lib.packet.bgp import BGPFlowSpecTrafficActionCommunity +from ryu.lib.packet.bgp import BGPFlowSpecRedirectCommunity +from ryu.lib.packet.bgp import BGPFlowSpecTrafficMarkingCommunity +from ryu.lib.packet.bgp import BGPFlowSpecVlanActionCommunity +from ryu.lib.packet.bgp import BGPFlowSpecTPIDActionCommunity + from ryu.services.protocols.bgp.api.base import EVPN_ROUTE_TYPE from ryu.services.protocols.bgp.api.base import EVPN_ESI from ryu.services.protocols.bgp.api.base import EVPN_ETHERNET_TAG_ID +from ryu.services.protocols.bgp.api.base import REDUNDANCY_MODE from ryu.services.protocols.bgp.api.base import MAC_ADDR from ryu.services.protocols.bgp.api.base import IP_ADDR from ryu.services.protocols.bgp.api.base import IP_PREFIX @@ -38,6 +55,9 @@ from ryu.services.protocols.bgp.api.base import EVPN_VNI from ryu.services.protocols.bgp.api.base import TUNNEL_TYPE from ryu.services.protocols.bgp.api.base import PMSI_TUNNEL_TYPE +from ryu.services.protocols.bgp.api.base import FLOWSPEC_FAMILY +from ryu.services.protocols.bgp.api.base import FLOWSPEC_RULES +from ryu.services.protocols.bgp.api.base import FLOWSPEC_ACTIONS from ryu.services.protocols.bgp.base import add_bgp_error_metadata from ryu.services.protocols.bgp.base import PREFIX_ERROR_CODE from ryu.services.protocols.bgp.base import validate @@ -50,19 +70,83 @@ from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_L2_EVPN from ryu.services.protocols.bgp.utils import validation - LOG = logging.getLogger('bgpspeaker.api.prefix') +# Maximum value of the Ethernet Tag ID +EVPN_MAX_ET = EvpnNLRI.MAX_ET + +# ESI Types +ESI_TYPE_ARBITRARY = EvpnEsi.ARBITRARY +ESI_TYPE_LACP = EvpnEsi.LACP +ESI_TYPE_L2_BRIDGE = EvpnEsi.L2_BRIDGE +ESI_TYPE_MAC_BASED = EvpnEsi.MAC_BASED +ESI_TYPE_ROUTER_ID = EvpnEsi.ROUTER_ID +ESI_TYPE_AS_BASED = EvpnEsi.AS_BASED +SUPPORTED_ESI_TYPES = [ + ESI_TYPE_ARBITRARY, + ESI_TYPE_LACP, + ESI_TYPE_L2_BRIDGE, + ESI_TYPE_MAC_BASED, + ESI_TYPE_ROUTER_ID, + ESI_TYPE_AS_BASED, +] + # Constants used in API calls for EVPN +EVPN_ETH_AUTO_DISCOVERY = EvpnEthernetAutoDiscoveryNLRI.ROUTE_TYPE_NAME EVPN_MAC_IP_ADV_ROUTE = EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME -EVPN_MULTICAST_ETAG_ROUTE = EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME +EVPN_MULTICAST_ETAG_ROUTE = ( + EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME) +EVPN_ETH_SEGMENT = EvpnEthernetSegmentNLRI.ROUTE_TYPE_NAME EVPN_IP_PREFIX_ROUTE = EvpnIpPrefixNLRI.ROUTE_TYPE_NAME SUPPORTED_EVPN_ROUTE_TYPES = [ + EVPN_ETH_AUTO_DISCOVERY, EVPN_MAC_IP_ADV_ROUTE, EVPN_MULTICAST_ETAG_ROUTE, + EVPN_ETH_SEGMENT, EVPN_IP_PREFIX_ROUTE, ] +# Constants used in API calls for Flow Specification +FLOWSPEC_FAMILY_IPV4 = FlowSpecIPv4NLRI.FLOWSPEC_FAMILY +FLOWSPEC_FAMILY_IPV6 = FlowSpecIPv6NLRI.FLOWSPEC_FAMILY +FLOWSPEC_FAMILY_VPNV4 = FlowSpecVPNv4NLRI.FLOWSPEC_FAMILY +FLOWSPEC_FAMILY_VPNV6 = FlowSpecVPNv6NLRI.FLOWSPEC_FAMILY +FLOWSPEC_FAMILY_L2VPN = FlowSpecL2VPNNLRI.FLOWSPEC_FAMILY +SUPPORTED_FLOWSPEC_FAMILIES = ( + FLOWSPEC_FAMILY_IPV4, + FLOWSPEC_FAMILY_IPV6, + FLOWSPEC_FAMILY_VPNV4, + FLOWSPEC_FAMILY_VPNV6, + FLOWSPEC_FAMILY_L2VPN, +) + +# Constants for the Traffic Filtering Actions of Flow Specification +# Constants for the Traffic Filtering Actions of Flow Specification. +FLOWSPEC_ACTION_TRAFFIC_RATE = BGPFlowSpecTrafficRateCommunity.ACTION_NAME +FLOWSPEC_ACTION_TRAFFIC_ACTION = BGPFlowSpecTrafficActionCommunity.ACTION_NAME +FLOWSPEC_ACTION_REDIRECT = BGPFlowSpecRedirectCommunity.ACTION_NAME +FLOWSPEC_ACTION_TRAFFIC_MARKING = BGPFlowSpecTrafficMarkingCommunity.ACTION_NAME +FLOWSPEC_ACTION_VLAN = BGPFlowSpecVlanActionCommunity.ACTION_NAME +FLOWSPEC_ACTION_TPID = BGPFlowSpecTPIDActionCommunity.ACTION_NAME + +SUPPORTTED_FLOWSPEC_ACTIONS = ( + FLOWSPEC_ACTION_TRAFFIC_RATE, + FLOWSPEC_ACTION_TRAFFIC_ACTION, + FLOWSPEC_ACTION_REDIRECT, + FLOWSPEC_ACTION_TRAFFIC_MARKING, + FLOWSPEC_ACTION_VLAN, + FLOWSPEC_ACTION_TPID, +) + + +# Constants for ESI Label extended community +REDUNDANCY_MODE_ALL_ACTIVE = 'all_active' +REDUNDANCY_MODE_SINGLE_ACTIVE = 'single_active' +SUPPORTED_REDUNDANCY_MODES = [ + REDUNDANCY_MODE_ALL_ACTIVE, + REDUNDANCY_MODE_SINGLE_ACTIVE, +] + # Constants for BGP Tunnel Encapsulation Attribute TUNNEL_TYPE_VXLAN = 'vxlan' TUNNEL_TYPE_NVGRE = 'nvgre' @@ -134,6 +218,13 @@ conf_value=ethernet_tag_id) +@validate(name=REDUNDANCY_MODE) +def is_valid_redundancy_mode(redundancy_mode): + if redundancy_mode not in SUPPORTED_REDUNDANCY_MODES: + raise ConfigValueError(conf_name=REDUNDANCY_MODE, + conf_value=redundancy_mode) + + @validate(name=MAC_ADDR) def is_valid_mac_addr(addr): if not validation.is_valid_mac(addr): @@ -143,7 +234,10 @@ @validate(name=IP_ADDR) def is_valid_ip_addr(addr): - if not (validation.is_valid_ipv4(addr) + # Note: Allows empty IP Address (means length=0). + # e.g.) L2VPN MAC advertisement of Cisco NX-OS + if not (addr is None + or validation.is_valid_ipv4(addr) or validation.is_valid_ipv6(addr)): raise ConfigValueError(conf_name=IP_ADDR, conf_value=addr) @@ -193,6 +287,28 @@ conf_value=pmsi_tunnel_type) +@validate(name=FLOWSPEC_FAMILY) +def is_valid_flowspec_family(flowspec_family): + if flowspec_family not in SUPPORTED_FLOWSPEC_FAMILIES: + raise ConfigValueError(conf_name=FLOWSPEC_FAMILY, + conf_value=flowspec_family) + + +@validate(name=FLOWSPEC_RULES) +def is_valid_flowspec_rules(rules): + if not isinstance(rules, dict): + raise ConfigValueError(conf_name=FLOWSPEC_RULES, + conf_value=rules) + + +@validate(name=FLOWSPEC_ACTIONS) +def is_valid_flowspec_actions(actions): + for k in actions: + if k not in SUPPORTTED_FLOWSPEC_ACTIONS: + raise ConfigValueError(conf_name=FLOWSPEC_ACTIONS, + conf_value=actions) + + @RegisterWithArgChecks(name='prefix.add_local', req_args=[ROUTE_DISTINGUISHER, PREFIX, NEXT_HOP], opt_args=[VRF_RF]) @@ -241,12 +357,19 @@ @RegisterWithArgChecks(name='evpn_prefix.add_local', req_args=[EVPN_ROUTE_TYPE, ROUTE_DISTINGUISHER, NEXT_HOP], - opt_args=[EVPN_ESI, EVPN_ETHERNET_TAG_ID, MAC_ADDR, - IP_ADDR, IP_PREFIX, GW_IP_ADDR, EVPN_VNI, - TUNNEL_TYPE, PMSI_TUNNEL_TYPE]) + opt_args=[EVPN_ESI, EVPN_ETHERNET_TAG_ID, + REDUNDANCY_MODE, MAC_ADDR, IP_ADDR, IP_PREFIX, + GW_IP_ADDR, EVPN_VNI, TUNNEL_TYPE, + PMSI_TUNNEL_TYPE]) def add_evpn_local(route_type, route_dist, next_hop, **kwargs): """Adds EVPN route from VRF identified by *route_dist*. """ + + if(route_type in [EVPN_ETH_AUTO_DISCOVERY, EVPN_ETH_SEGMENT] + and kwargs['esi'] == 0): + raise ConfigValueError(conf_name=EVPN_ESI, + conf_value=kwargs['esi']) + try: # Create new path and insert into appropriate VRF table. tm = CORE_MANAGER.get_core_service().table_manager @@ -285,3 +408,52 @@ VRF_RF: VRF_RF_L2_EVPN}.update(kwargs)] except BgpCoreError as e: raise PrefixError(desc=e) + + +# ============================================================================= +# BGP Flow Specification Routes related APIs +# ============================================================================= + +@RegisterWithArgChecks( + name='flowspec.add_local', + req_args=[FLOWSPEC_FAMILY, ROUTE_DISTINGUISHER, FLOWSPEC_RULES], + opt_args=[FLOWSPEC_ACTIONS]) +def add_flowspec_local(flowspec_family, route_dist, rules, **kwargs): + """Adds Flow Specification route from VRF identified by *route_dist*. + """ + try: + # Create new path and insert into appropriate VRF table. + tm = CORE_MANAGER.get_core_service().table_manager + tm.update_flowspec_vrf_table( + flowspec_family=flowspec_family, route_dist=route_dist, + rules=rules, **kwargs) + + # Send success response. + return [{FLOWSPEC_FAMILY: flowspec_family, + ROUTE_DISTINGUISHER: route_dist, + FLOWSPEC_RULES: rules}.update(kwargs)] + + except BgpCoreError as e: + raise PrefixError(desc=e) + + +@RegisterWithArgChecks( + name='flowspec.del_local', + req_args=[FLOWSPEC_FAMILY, ROUTE_DISTINGUISHER, FLOWSPEC_RULES]) +def del_flowspec_local(flowspec_family, route_dist, rules): + """Deletes/withdraws Flow Specification route from VRF identified + by *route_dist*. + """ + try: + tm = CORE_MANAGER.get_core_service().table_manager + tm.update_flowspec_vrf_table( + flowspec_family=flowspec_family, route_dist=route_dist, + rules=rules, is_withdraw=True) + + # Send success response. + return [{FLOWSPEC_FAMILY: flowspec_family, + ROUTE_DISTINGUISHER: route_dist, + FLOWSPEC_RULES: rules}] + + except BgpCoreError as e: + raise PrefixError(desc=e) diff -Nru ryu-4.9/ryu/services/protocols/bgp/api/rpc_log_handler.py ryu-4.15/ryu/services/protocols/bgp/api/rpc_log_handler.py --- ryu-4.9/ryu/services/protocols/bgp/api/rpc_log_handler.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/api/rpc_log_handler.py 2017-07-02 11:08:32.000000000 +0000 @@ -20,7 +20,7 @@ import logging from ryu.services.protocols.bgp.net_ctrl import NET_CONTROLLER -from ryu.services.protocols.bgp.net_ctrl import NOTF_LOG +from ryu.services.protocols.bgp.net_ctrl import NOTIFICATION_LOG class RpcLogHandler(logging.Handler): @@ -28,7 +28,7 @@ def emit(self, record): msg = self.format(record) NET_CONTROLLER.send_rpc_notification( - NOTF_LOG, + NOTIFICATION_LOG, { 'level': record.levelname, 'msg': msg diff -Nru ryu-4.9/ryu/services/protocols/bgp/api/rtconf.py ryu-4.15/ryu/services/protocols/bgp/api/rtconf.py --- ryu-4.9/ryu/services/protocols/bgp/api/rtconf.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/api/rtconf.py 2017-07-02 11:08:32.000000000 +0000 @@ -20,6 +20,9 @@ from ryu.services.protocols.bgp.api.base import register from ryu.services.protocols.bgp.api.base import RegisterWithArgChecks +from ryu.services.protocols.bgp.api.base import FLOWSPEC_FAMILY +from ryu.services.protocols.bgp.api.base import FLOWSPEC_RULES +from ryu.services.protocols.bgp.api.base import FLOWSPEC_ACTIONS from ryu.services.protocols.bgp.core_manager import CORE_MANAGER from ryu.services.protocols.bgp.rtconf.base import ConfWithId from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError @@ -297,3 +300,26 @@ def bmp_stop(host, port): core = CORE_MANAGER.get_core_service() return core.stop_bmp(host, port) + + +# ============================================================================= +# BGP Flow Specification Routes related APIs +# ============================================================================= + +@RegisterWithArgChecks( + name='flowspec.add', + req_args=[FLOWSPEC_FAMILY, FLOWSPEC_RULES], + opt_args=[FLOWSPEC_ACTIONS]) +def add_flowspec(flowspec_family, rules, **kwargs): + tm = CORE_MANAGER.get_core_service().table_manager + tm.update_flowspec_global_table(flowspec_family, rules, **kwargs) + return True + + +@RegisterWithArgChecks( + name='flowspec.del', + req_args=[FLOWSPEC_FAMILY, FLOWSPEC_RULES]) +def del_flowspec(flowspec_family, rules): + tm = CORE_MANAGER.get_core_service().table_manager + tm.update_flowspec_global_table(flowspec_family, rules, is_withdraw=True) + return True diff -Nru ryu-4.9/ryu/services/protocols/bgp/application.py ryu-4.15/ryu/services/protocols/bgp/application.py --- ryu-4.9/ryu/services/protocols/bgp/application.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/application.py 2017-07-02 11:08:32.000000000 +0000 @@ -24,6 +24,7 @@ from ryu.lib import hub from ryu.utils import load_source from ryu.base.app_manager import RyuApp +from ryu.controller.event import EventBase from ryu.services.protocols.bgp.base import add_bgp_error_metadata from ryu.services.protocols.bgp.base import BGPSException from ryu.services.protocols.bgp.base import BIN_ERROR @@ -42,6 +43,8 @@ from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID +from ryu.services.protocols.bgp.rtconf.common import LOCAL_PREF +from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LOCAL_PREF from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4 from ryu.services.protocols.bgp.utils.validation import is_valid_ipv6 @@ -87,7 +90,100 @@ raise ApplicationException(desc=str(e)) +class EventBestPathChanged(EventBase): + """ + Event called when any best remote path is changed due to UPDATE messages + or remote peer's down. + + This event is the wrapper for ``best_path_change_handler`` of + ``bgpspeaker.BGPSpeaker``. + + ``path`` attribute contains an instance of ``info_base.base.Path`` + subclasses. + + If ``is_withdraw`` attribute is ``True``, ``path`` attribute has the + information of the withdraw route. + """ + + def __init__(self, path, is_withdraw): + super(EventBestPathChanged, self).__init__() + self.path = path + self.is_withdraw = is_withdraw + + +class EventPeerDown(EventBase): + """ + Event called when the session to the remote peer goes down. + + This event is the wrapper for ``peer_down_handler`` of + ``bgpspeaker.BGPSpeaker``. + + ``remote_ip`` attribute is the IP address of the remote peer. + + ``remote_as`` attribute is the AS number of the remote peer. + """ + + def __init__(self, remote_ip, remote_as): + super(EventPeerDown, self).__init__() + self.remote_ip = remote_ip + self.remote_as = remote_as + + +class EventPeerUp(EventBase): + """ + Event called when the session to the remote peer goes up. + + This event is the wrapper for ``peer_up_handler`` of + ``bgpspeaker.BGPSpeaker``. + + ``remote_ip`` attribute is the IP address of the remote peer. + + ``remote_as`` attribute is the AS number of the remote peer. + """ + + def __init__(self, remote_ip, remote_as): + super(EventPeerUp, self).__init__() + self.remote_ip = remote_ip + self.remote_as = remote_as + + class RyuBGPSpeaker(RyuApp): + """ + Base application for implementing BGP applications. + + This application will notifies + - ``EventBestPathChanged`` + - ``EventPeerDown`` + - ``EventPeerUp`` + to other BGP applications. + To catch these events, specify ``@set_ev_cls()`` decorator to the event + handlers in the Ryu applications. + + Example:: + + ... + from ryu.base import app_manager + from ryu.controller.handler import set_ev_cls + from ryu.services.protocols.bgp import application as bgp_application + ... + + class MyBGPApp(app_manager.RyuApp): + _CONTEXTS = { + 'ryubgpspeaker': bgp_application.RyuBGPSpeaker, + } + + ... + @set_ev_cls(bgp_application.EventBestPathChanged) + def _best_patch_changed_handler(self, ev): + self.logger.info( + 'Best path changed: is_withdraw=%s, path=%s', + ev.is_withdraw, ev.path) + """ + _EVENTS = [ + EventBestPathChanged, + EventPeerDown, + EventPeerUp, + ] def __init__(self, *args, **kwargs): super(RyuBGPSpeaker, self).__init__(*args, **kwargs) @@ -149,6 +245,14 @@ raise ApplicationException( desc='Required BGP configuration missing: %s' % e) + # Set event notify handlers if no corresponding handler specified. + bgp_settings['best_path_change_handler'] = settings.get( + 'best_path_change_handler', self._notify_best_path_changed_event) + bgp_settings['peer_down_handler'] = settings.get( + 'peer_down_handler', self._notify_peer_down_event) + bgp_settings['peer_up_handler'] = settings.get( + 'peer_up_handler', self._notify_peer_up_event) + # Get optional settings. bgp_settings[BGP_SERVER_PORT] = settings.get( BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT) @@ -158,6 +262,10 @@ REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_MAX_EOR_TIME) bgp_settings[LABEL_RANGE] = settings.get( LABEL_RANGE, DEFAULT_LABEL_RANGE) + bgp_settings['allow_local_as_in_count'] = settings.get( + 'allow_local_as_in_count', 0) + bgp_settings[LOCAL_PREF] = settings.get( + LOCAL_PREF, DEFAULT_LOCAL_PREF) # Create BGPSpeaker instance. LOG.debug('Starting BGPSpeaker...') @@ -175,6 +283,18 @@ LOG.debug('Adding routes...') self._add_routes(settings.get('routes', [])) + def _notify_best_path_changed_event(self, ev): + ev = EventBestPathChanged(ev.path, ev.is_withdraw) + self.send_event_to_observers(ev) + + def _notify_peer_down_event(self, remote_ip, remote_as): + ev = EventPeerDown(remote_ip, remote_as) + self.send_event_to_observers(ev) + + def _notify_peer_up_event(self, remote_ip, remote_as): + ev = EventPeerUp(remote_ip, remote_as) + self.send_event_to_observers(ev) + def _add_neighbors(self, settings): """ Add BGP neighbors from the given settings. @@ -215,6 +335,8 @@ prefix_add = self.speaker.prefix_add elif 'route_type' in route_settings: prefix_add = self.speaker.evpn_prefix_add + elif 'flowspec_family' in route_settings: + prefix_add = self.speaker.flowspec_prefix_add else: LOG.debug('Skip invalid route settings: %s', route_settings) continue diff -Nru ryu-4.9/ryu/services/protocols/bgp/base.py ryu-4.15/ryu/services/protocols/bgp/base.py --- ryu-4.9/ryu/services/protocols/bgp/base.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/base.py 2017-07-02 11:08:32.000000000 +0000 @@ -36,6 +36,11 @@ from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.lib.packet.bgp import RF_IPv6_VPN from ryu.lib.packet.bgp import RF_L2_EVPN +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC from ryu.lib.packet.bgp import RF_RTC_UC from ryu.services.protocols.bgp.utils.circlist import CircularListType from ryu.services.protocols.bgp.utils.evtlet import LoopingCall @@ -56,6 +61,11 @@ RF_RTC_UC, RF_IPv6_VPN, RF_L2_EVPN, + RF_IPv4_FLOWSPEC, + RF_IPv6_FLOWSPEC, + RF_VPNv4_FLOWSPEC, + RF_VPNv6_FLOWSPEC, + RF_L2VPN_FLOWSPEC, } @@ -257,21 +267,24 @@ """ hub.sleep(seconds) - def _stop_child_activities(self): + def _stop_child_activities(self, name=None): """Stop all child activities spawn by this activity. """ # Makes a list copy of items() to avoid dictionary size changed # during iteration for child_name, child in list(self._child_activity_map.items()): + if name is not None and name != child_name: + continue LOG.debug('%s: Stopping child activity %s ', self.name, child_name) if child.started: child.stop() + self._child_activity_map.pop(child_name, None) def _stop_child_threads(self, name=None): """Stops all threads spawn by this activity. """ for thread_name, thread in list(self._child_thread_map.items()): - if not name or thread_name is name: + if name is not None and thread_name is name: LOG.debug('%s: Stopping child thread %s', self.name, thread_name) thread.kill() diff -Nru ryu-4.9/ryu/services/protocols/bgp/bgp_sample_conf.py ryu-4.15/ryu/services/protocols/bgp/bgp_sample_conf.py --- ryu-4.9/ryu/services/protocols/bgp/bgp_sample_conf.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/bgp_sample_conf.py 2017-07-02 11:08:32.000000000 +0000 @@ -3,11 +3,33 @@ from ryu.services.protocols.bgp.bgpspeaker import RF_VPN_V4 from ryu.services.protocols.bgp.bgpspeaker import RF_VPN_V6 from ryu.services.protocols.bgp.bgpspeaker import RF_L2_EVPN +from ryu.services.protocols.bgp.bgpspeaker import RF_VPNV4_FLOWSPEC +from ryu.services.protocols.bgp.bgpspeaker import RF_VPNV6_FLOWSPEC +from ryu.services.protocols.bgp.bgpspeaker import RF_L2VPN_FLOWSPEC +from ryu.services.protocols.bgp.bgpspeaker import EVPN_MAX_ET +from ryu.services.protocols.bgp.bgpspeaker import ESI_TYPE_LACP +from ryu.services.protocols.bgp.bgpspeaker import ESI_TYPE_MAC_BASED +from ryu.services.protocols.bgp.bgpspeaker import EVPN_ETH_AUTO_DISCOVERY from ryu.services.protocols.bgp.bgpspeaker import EVPN_MAC_IP_ADV_ROUTE from ryu.services.protocols.bgp.bgpspeaker import TUNNEL_TYPE_VXLAN from ryu.services.protocols.bgp.bgpspeaker import EVPN_MULTICAST_ETAG_ROUTE +from ryu.services.protocols.bgp.bgpspeaker import EVPN_ETH_SEGMENT from ryu.services.protocols.bgp.bgpspeaker import EVPN_IP_PREFIX_ROUTE - +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_IPV4 +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_IPV6 +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_VPNV4 +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_VPNV6 +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_L2VPN +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TA_SAMPLE +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TA_TERMINAL +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_VLAN_POP +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_VLAN_PUSH +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_VLAN_SWAP +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_VLAN_RW_INNER +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_VLAN_RW_OUTER +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TPID_TI +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TPID_TO +from ryu.services.protocols.bgp.bgpspeaker import REDUNDANCY_MODE_SINGLE_ACTIVE # ============================================================================= # BGP configuration. @@ -20,6 +42,9 @@ # BGP Router ID. 'router_id': '172.17.0.1', + # Default local preference + 'local_pref': 100, + # List of BGP neighbors. # The parameters for each neighbor are the same as the arguments of # BGPSpeaker.neighbor_add() method. @@ -37,6 +62,15 @@ 'remote_as': 65001, 'enable_evpn': True, }, + { + 'address': '172.17.0.4', + 'remote_as': 65001, + 'enable_ipv4fs': True, + 'enable_ipv6fs': True, + 'enable_vpnv4fs': True, + 'enable_vpnv6fs': True, + 'enable_l2vpnfs': True, + }, ], # List of BGP VRF tables. @@ -64,11 +98,35 @@ 'export_rts': ['65001:200'], 'route_family': RF_L2_EVPN, }, + # Example of VRF for IPv4 FlowSpec + { + 'route_dist': '65001:250', + 'import_rts': ['65001:250'], + 'export_rts': ['65001:250'], + 'route_family': RF_VPNV4_FLOWSPEC, + }, + # Example of VRF for IPv6 FlowSpec + { + 'route_dist': '65001:300', + 'import_rts': ['65001:300'], + 'export_rts': ['65001:300'], + 'route_family': RF_VPNV6_FLOWSPEC, + }, + # Example of VRF for L2VPN FlowSpec + { + 'route_dist': '65001:350', + 'import_rts': ['65001:350'], + 'export_rts': ['65001:350'], + 'route_family': RF_L2VPN_FLOWSPEC, + }, ], # List of BGP routes. # The parameters for each route are the same as the arguments of - # BGPSpeaker.prefix_add() or BGPSpeaker.evpn_prefix_add() method. + # the following methods: + # - BGPSpeaker.prefix_add() + # - BGPSpeaker.evpn_prefix_add() + # - BGPSpeaker.flowspec_prefix_add() 'routes': [ # Example of IPv4 prefix { @@ -92,6 +150,17 @@ }, # Example of EVPN prefix { + 'route_type': EVPN_ETH_AUTO_DISCOVERY, + 'route_dist': '65001:200', + 'esi': { + 'type': ESI_TYPE_LACP, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'port_key': 100, + }, + 'ethernet_tag_id': EVPN_MAX_ET, + 'redundancy_mode': REDUNDANCY_MODE_SINGLE_ACTIVE, + }, + { 'route_type': EVPN_MAC_IP_ADV_ROUTE, 'route_dist': '65001:200', 'esi': 0, @@ -110,6 +179,16 @@ 'ip_addr': '10.40.1.1', }, { + 'route_type': EVPN_ETH_SEGMENT, + 'route_dist': '65001:200', + 'esi': { + 'type': ESI_TYPE_MAC_BASED, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'local_disc': 100, + }, + 'ip_addr': '172.17.0.1', + }, + { 'route_type': EVPN_IP_PREFIX_ROUTE, 'route_dist': '65001:200', 'esi': 0, @@ -117,6 +196,193 @@ 'ip_prefix': '10.50.1.0/24', 'gw_ip_addr': '172.16.0.1', }, + # Example of Flow Specification IPv4 prefix + { + 'flowspec_family': FLOWSPEC_FAMILY_IPV4, + 'rules': { + 'dst_prefix': '10.60.1.0/24', + 'src_prefix': '172.17.0.0/24', + 'ip_proto': 6, + 'port': '80 | 8000', + 'dst_port': '>9000 & <9050', + 'src_port': '>=8500 & <=9000', + 'icmp_type': 0, + 'icmp_code': 6, + 'tcp_flags': 'SYN+ACK & !=URGENT', + 'packet_len': 1000, + 'dscp': '22 | 24', + 'fragment': 'LF | ==FF', + }, + 'actions': { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': FLOWSPEC_TA_SAMPLE | FLOWSPEC_TA_TERMINAL, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 100, + }, + 'traffic_marking': { + 'dscp': 24, + } + }, + }, + # Example of Flow Specification VPNv4 prefix + { + 'flowspec_family': FLOWSPEC_FAMILY_VPNV4, + 'route_dist': '65001:250', + 'rules': { + 'dst_prefix': '10.70.1.0/24', + 'src_prefix': '172.18.0.0/24', + 'ip_proto': 6, + 'port': '80 | 8000', + 'dst_port': '>9000 & <9050', + 'src_port': '>=8500 & <=9000', + 'icmp_type': 0, + 'icmp_code': 6, + 'tcp_flags': 'SYN+ACK & !=URGENT', + 'packet_len': 1000, + 'dscp': '22 | 24', + 'fragment': 'LF | ==FF', + }, + 'actions': { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': FLOWSPEC_TA_SAMPLE | FLOWSPEC_TA_TERMINAL, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 100, + }, + 'traffic_marking': { + 'dscp': 24, + } + }, + }, + # Example of Flow Specification IPv6 prefix + { + 'flowspec_family': FLOWSPEC_FAMILY_IPV6, + 'rules': { + 'dst_prefix': '2001::1/128/32', + 'src_prefix': '3001::2/128', + 'next_header': 6, + 'port': '80 | 8000', + 'dst_port': '>9000 & <9050', + 'src_port': '>=8500 & <=9000', + 'icmp_type': 0, + 'icmp_code': 6, + 'tcp_flags': 'SYN+ACK & !=URGENT', + 'packet_len': 1000, + 'dscp': '22 | 24', + 'fragment': 'LF | ==FF', + 'flow_label': 100, + }, + 'actions': { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': FLOWSPEC_TA_SAMPLE | FLOWSPEC_TA_TERMINAL, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 100, + }, + 'traffic_marking': { + 'dscp': 24, + } + }, + }, + # Example of Flow Specification VPNv6 prefix + { + 'flowspec_family': FLOWSPEC_FAMILY_VPNV6, + 'route_dist': '65001:300', + 'rules': { + 'dst_prefix': '2001::1/128/32', + 'src_prefix': '3001::2/128', + 'next_header': 6, + 'port': '80 | 8000', + 'dst_port': '>9000 & <9050', + 'src_port': '>=8500 & <=9000', + 'icmp_type': 0, + 'icmp_code': 6, + 'tcp_flags': 'SYN+ACK & !=URGENT', + 'packet_len': 1000, + 'dscp': '22 | 24', + 'fragment': 'LF | ==FF', + 'flow_label': 100, + }, + 'actions': { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': FLOWSPEC_TA_SAMPLE | FLOWSPEC_TA_TERMINAL, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 100, + }, + 'traffic_marking': { + 'dscp': 24, + } + }, + }, + # Example of Flow Specification L2VPN prefix + { + 'flowspec_family': FLOWSPEC_FAMILY_L2VPN, + 'route_dist': '65001:350', + 'rules': { + 'ether_type': 0x0800, + 'src_mac': '12:34:56:78:90:AB', + 'dst_mac': 'BE:EF:C0:FF:EE:DD', + 'llc_dsap': 0x42, + 'llc_ssap': 0x42, + 'llc_control': 100, + 'snap': 0x12345, + 'vlan_id': '>4000', + 'vlan_cos': '>=3', + 'inner_vlan_id': '<3000', + 'inner_vlan_cos': '<=5', + }, + 'actions': { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': FLOWSPEC_TA_SAMPLE | FLOWSPEC_TA_TERMINAL, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 100, + }, + 'traffic_marking': { + 'dscp': 24, + }, + 'vlan_action': { + 'actions_1': FLOWSPEC_VLAN_POP | FLOWSPEC_VLAN_PUSH, + 'vlan_1': 3000, + 'cos_1': 3, + 'actions_2': FLOWSPEC_VLAN_SWAP, + 'vlan_2': 4000, + 'cos_2': 2, + }, + 'tpid_action': { + 'actions': FLOWSPEC_TPID_TI | FLOWSPEC_TPID_TO, + 'tpid_1': 200, + 'tpid_2': 300, + } + }, + } ], } diff -Nru ryu-4.9/ryu/services/protocols/bgp/bgpspeaker.py ryu-4.15/ryu/services/protocols/bgp/bgpspeaker.py --- ryu-4.9/ryu/services/protocols/bgp/bgpspeaker.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/bgpspeaker.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,6 +18,11 @@ import netaddr from ryu.lib import hub +from ryu.lib.packet.bgp import ( + BGPFlowSpecTrafficActionCommunity, + BGPFlowSpecVlanActionCommunity, + BGPFlowSpecTPIDActionCommunity, +) from ryu.services.protocols.bgp.core_manager import CORE_MANAGER from ryu.services.protocols.bgp.signals.emit import BgpSignalBus @@ -26,6 +31,7 @@ from ryu.services.protocols.bgp.api.base import EVPN_ROUTE_TYPE from ryu.services.protocols.bgp.api.base import EVPN_ESI from ryu.services.protocols.bgp.api.base import EVPN_ETHERNET_TAG_ID +from ryu.services.protocols.bgp.api.base import REDUNDANCY_MODE from ryu.services.protocols.bgp.api.base import IP_ADDR from ryu.services.protocols.bgp.api.base import MAC_ADDR from ryu.services.protocols.bgp.api.base import NEXT_HOP @@ -36,16 +42,34 @@ from ryu.services.protocols.bgp.api.base import EVPN_VNI from ryu.services.protocols.bgp.api.base import TUNNEL_TYPE from ryu.services.protocols.bgp.api.base import PMSI_TUNNEL_TYPE +from ryu.services.protocols.bgp.api.prefix import EVPN_MAX_ET +from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_LACP +from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_L2_BRIDGE +from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_MAC_BASED +from ryu.services.protocols.bgp.api.prefix import EVPN_ETH_AUTO_DISCOVERY from ryu.services.protocols.bgp.api.prefix import EVPN_MAC_IP_ADV_ROUTE from ryu.services.protocols.bgp.api.prefix import EVPN_MULTICAST_ETAG_ROUTE +from ryu.services.protocols.bgp.api.prefix import EVPN_ETH_SEGMENT from ryu.services.protocols.bgp.api.prefix import EVPN_IP_PREFIX_ROUTE +from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_ALL_ACTIVE +from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_SINGLE_ACTIVE from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_VXLAN from ryu.services.protocols.bgp.api.prefix import TUNNEL_TYPE_NVGRE from ryu.services.protocols.bgp.api.prefix import ( PMSI_TYPE_NO_TUNNEL_INFO, PMSI_TYPE_INGRESS_REP) +from ryu.services.protocols.bgp.api.prefix import ( + FLOWSPEC_FAMILY, + FLOWSPEC_FAMILY_IPV4, + FLOWSPEC_FAMILY_VPNV4, + FLOWSPEC_FAMILY_IPV6, + FLOWSPEC_FAMILY_VPNV6, + FLOWSPEC_FAMILY_L2VPN, + FLOWSPEC_RULES, + FLOWSPEC_ACTIONS) from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID +from ryu.services.protocols.bgp.rtconf.common import CLUSTER_ID from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT from ryu.services.protocols.bgp.rtconf.common import ( @@ -54,6 +78,9 @@ from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE +from ryu.services.protocols.bgp.rtconf.common import ALLOW_LOCAL_AS_IN_COUNT +from ryu.services.protocols.bgp.rtconf.common import LOCAL_PREF +from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LOCAL_PREF from ryu.services.protocols.bgp.rtconf import neighbors from ryu.services.protocols.bgp.rtconf import vrfs from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4 @@ -61,22 +88,38 @@ from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4 from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6 from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_EVPN +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_L2VPNFS from ryu.services.protocols.bgp.rtconf.base import CAP_ENHANCED_REFRESH from ryu.services.protocols.bgp.rtconf.base import CAP_FOUR_OCTET_AS_NUMBER from ryu.services.protocols.bgp.rtconf.base import MULTI_EXIT_DISC from ryu.services.protocols.bgp.rtconf.base import SITE_OF_ORIGINS -from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_IPV4 -from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_IPV6 -from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_VPNV4 -from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_VPNV6 -from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CAP_MBGP_EVPN +from ryu.services.protocols.bgp.rtconf.neighbors import ( + DEFAULT_CAP_MBGP_IPV4, + DEFAULT_CAP_MBGP_IPV6, + DEFAULT_CAP_MBGP_VPNV4, + DEFAULT_CAP_MBGP_VPNV6, + DEFAULT_CAP_MBGP_EVPN, + DEFAULT_CAP_MBGP_IPV4FS, + DEFAULT_CAP_MBGP_IPV6FS, + DEFAULT_CAP_MBGP_VPNV4FS, + DEFAULT_CAP_MBGP_VPNV6FS, + DEFAULT_CAP_MBGP_L2VPNFS, +) from ryu.services.protocols.bgp.rtconf.neighbors import ( DEFAULT_CAP_ENHANCED_REFRESH, DEFAULT_CAP_FOUR_OCTET_AS_NUMBER) from ryu.services.protocols.bgp.rtconf.neighbors import DEFAULT_CONNECT_MODE from ryu.services.protocols.bgp.rtconf.neighbors import PEER_NEXT_HOP from ryu.services.protocols.bgp.rtconf.neighbors import PASSWORD -from ryu.services.protocols.bgp.rtconf.neighbors import IS_ROUTE_SERVER_CLIENT -from ryu.services.protocols.bgp.rtconf.neighbors import IS_NEXT_HOP_SELF +from ryu.services.protocols.bgp.rtconf.neighbors import ( + DEFAULT_IS_ROUTE_SERVER_CLIENT, IS_ROUTE_SERVER_CLIENT) +from ryu.services.protocols.bgp.rtconf.neighbors import ( + DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, IS_ROUTE_REFLECTOR_CLIENT) +from ryu.services.protocols.bgp.rtconf.neighbors import ( + DEFAULT_IS_NEXT_HOP_SELF, IS_NEXT_HOP_SELF) from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_ADDRESS from ryu.services.protocols.bgp.rtconf.neighbors import LOCAL_PORT @@ -93,6 +136,24 @@ RF_VPN_V4 = vrfs.VRF_RF_IPV4 RF_VPN_V6 = vrfs.VRF_RF_IPV6 RF_L2_EVPN = vrfs.VRF_RF_L2_EVPN +RF_VPNV4_FLOWSPEC = vrfs.VRF_RF_IPV4_FLOWSPEC +RF_VPNV6_FLOWSPEC = vrfs.VRF_RF_IPV6_FLOWSPEC +RF_L2VPN_FLOWSPEC = vrfs.VRF_RF_L2VPN_FLOWSPEC + +# Constants for the Traffic Filtering Actions of Flow Specification. +FLOWSPEC_TA_SAMPLE = BGPFlowSpecTrafficActionCommunity.SAMPLE +FLOWSPEC_TA_TERMINAL = BGPFlowSpecTrafficActionCommunity.TERMINAL + +# Constants for the VLAN Actions of Flow Specification. +FLOWSPEC_VLAN_POP = BGPFlowSpecVlanActionCommunity.POP +FLOWSPEC_VLAN_PUSH = BGPFlowSpecVlanActionCommunity.PUSH +FLOWSPEC_VLAN_SWAP = BGPFlowSpecVlanActionCommunity.SWAP +FLOWSPEC_VLAN_RW_INNER = BGPFlowSpecVlanActionCommunity.REWRITE_INNER +FLOWSPEC_VLAN_RW_OUTER = BGPFlowSpecVlanActionCommunity.REWRITE_OUTER + +# Constants for the TPID Actions of Flow Specification. +FLOWSPEC_TPID_TI = BGPFlowSpecTPIDActionCommunity.TI +FLOWSPEC_TPID_TO = BGPFlowSpecTPIDActionCommunity.TO class EventPrefix(object): @@ -165,7 +226,10 @@ peer_up_handler=None, ssh_console=False, ssh_port=None, ssh_host=None, ssh_host_key=None, - label_range=DEFAULT_LABEL_RANGE): + label_range=DEFAULT_LABEL_RANGE, + allow_local_as_in_count=0, + cluster_id=None, + local_pref=DEFAULT_LOCAL_PREF): """Create a new BGPSpeaker object with as_number and router_id to listen on bgp_server_port. @@ -213,7 +277,21 @@ ``label_range`` specifies the range of MPLS labels generated automatically. + + ``allow_local_as_in_count`` maximum number of local AS number + occurrences in AS_PATH. This option is useful for e.g. auto RD/RT + configurations in leaf/spine architecture with shared AS numbers. + The default is 0 and means "local AS number is not allowed in + AS_PATH". To allow local AS, 3 is recommended (Cisco's default). + + ``cluster_id`` specifies the cluster identifier for Route Reflector. + It must be the string representation of an IPv4 address. + If omitted, "router_id" is used for this field. + + ``local_pref`` specifies the default local preference. It must be an + integer. """ + super(BGPSpeaker, self).__init__() settings = { @@ -223,6 +301,9 @@ REFRESH_STALEPATH_TIME: refresh_stalepath_time, REFRESH_MAX_EOR_TIME: refresh_max_eor_time, LABEL_RANGE: label_range, + ALLOW_LOCAL_AS_IN_COUNT: allow_local_as_in_count, + CLUSTER_ID: cluster_id, + LOCAL_PREF: local_pref, } self._core_start(settings) self._init_signal_listeners() @@ -300,11 +381,19 @@ enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4, enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6, enable_evpn=DEFAULT_CAP_MBGP_EVPN, + enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS, + enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS, + enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS, + enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS, + enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS, enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH, enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER, next_hop=None, password=None, multi_exit_disc=None, - site_of_origins=None, is_route_server_client=False, - is_next_hop_self=False, local_address=None, + site_of_origins=None, + is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT, + is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, + is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF, + local_address=None, local_port=None, local_as=None, connect_mode=DEFAULT_CONNECT_MODE): """ This method registers a new neighbor. The BGP speaker tries to @@ -319,25 +408,40 @@ an integer between 1 and 65535. ``enable_ipv4`` enables IPv4 address family for this - neighbor. The default is True. + neighbor. ``enable_ipv6`` enables IPv6 address family for this - neighbor. The default is False. + neighbor. ``enable_vpnv4`` enables VPNv4 address family for this - neighbor. The default is False. + neighbor. ``enable_vpnv6`` enables VPNv6 address family for this - neighbor. The default is False. + neighbor. ``enable_evpn`` enables Ethernet VPN address family for this - neighbor. The default is False. + neighbor. + + ``enable_ipv4fs`` enables IPv4 Flow Specification address family + for this neighbor. + + ``enable_ipv6fs`` enables IPv6 Flow Specification address family + for this neighbor. + + ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family + for this neighbor. + + ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family + for this neighbor. + + ``enable_l2vpnfs`` enables L2VPN Flow Specification address family + for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this - neighbor. The default is False. + neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number - capability for this neighbor. The default is True. + capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. @@ -345,9 +449,9 @@ ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. - ``multi_exit_disc`` specifies multi exit discriminator (MED) value. - The default is None and if not specified, MED value is - not sent to the neighbor. It must be an integer. + ``multi_exit_disc`` specifies multi exit discriminator (MED) value + as an int type value. + If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. @@ -355,6 +459,9 @@ ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. + ``is_route_reflector_client`` specifies whether this neighbor is a + router reflector's client or not. + ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. @@ -364,13 +471,14 @@ ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. - The default is the AS number of BGPSpeaker instance. + If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. - CONNECT_MODE_ACTIVE tries to connect from us. - CONNECT_MODE_PASSIVE just listens and wait for the connection. - CONNECT_MODE_BOTH use both methods. - The default is CONNECT_MODE_BOTH. + This parameter must be one of the following. + + - CONNECT_MODE_ACTIVE = 'active' + - CONNECT_MODE_PASSIVE = 'passive' + - CONNECT_MODE_BOTH (default) = 'both' """ bgp_neighbor = { neighbors.IP_ADDRESS: address, @@ -378,6 +486,7 @@ PEER_NEXT_HOP: next_hop, PASSWORD: password, IS_ROUTE_SERVER_CLIENT: is_route_server_client, + IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client, IS_NEXT_HOP_SELF: is_next_hop_self, CONNECT_MODE: connect_mode, CAP_ENHANCED_REFRESH: enable_enhanced_refresh, @@ -387,6 +496,11 @@ CAP_MBGP_VPNV4: enable_vpnv4, CAP_MBGP_VPNV6: enable_vpnv6, CAP_MBGP_EVPN: enable_evpn, + CAP_MBGP_IPV4FS: enable_ipv4fs, + CAP_MBGP_IPV6FS: enable_ipv6fs, + CAP_MBGP_VPNV4FS: enable_vpnv4fs, + CAP_MBGP_VPNV6FS: enable_vpnv6fs, + CAP_MBGP_L2VPNFS: enable_l2vpnfs, } if multi_exit_disc: @@ -465,7 +579,10 @@ state of all the peers return. ``format`` specifies the format of the response. - This parameter must be 'json' or 'cli'. + This parameter must be one of the following. + + - 'json' (default) + - 'cli' """ show = { 'params': ['neighbor', 'summary'], @@ -514,12 +631,9 @@ def prefix_del(self, prefix, route_dist=None): """ This method deletes a advertised prefix. - ``prefix`` must be the string representation of an IP network - (e.g., 10.1.1.0/24). + ``prefix`` must be the string representation of an IP network. - ``route_dist`` specifies a route distinguisher value. This - parameter is necessary for only VPNv4 and VPNv6 address - families. + ``route_dist`` specifies a route distinguisher value. """ func_name = 'network.del' networks = { @@ -538,18 +652,30 @@ def evpn_prefix_add(self, route_type, route_dist, esi=0, ethernet_tag_id=None, mac_addr=None, ip_addr=None, ip_prefix=None, gw_ip_addr=None, vni=None, - next_hop=None, tunnel_type=None, - pmsi_tunnel_type=None): + next_hop=None, tunnel_type=None, pmsi_tunnel_type=None, + redundancy_mode=None): """ This method adds a new EVPN route to be advertised. - ``route_type`` specifies one of the EVPN route type name. The - supported route types are EVPN_MAC_IP_ADV_ROUTE, - EVPN_MULTICAST_ETAG_ROUTE and EVPN_IP_PREFIX_ROUTE. + ``route_type`` specifies one of the EVPN route type name. + This parameter must be one of the following. + + - EVPN_ETH_AUTO_DISCOVERY = 'eth_ad' + - EVPN_MAC_IP_ADV_ROUTE = 'mac_ip_adv' + - EVPN_MULTICAST_ETAG_ROUTE = 'multicast_etag' + - EVPN_ETH_SEGMENT = 'eth_seg' + - EVPN_IP_PREFIX_ROUTE = 'ip_prefix' ``route_dist`` specifies a route distinguisher value. - ``esi`` is an integer value to specify the Ethernet Segment - Identifier. 0 is the default and denotes a single-homed site. + ``esi`` is an value to specify the Ethernet Segment Identifier. + 0 is the default and denotes a single-homed site. + If you want to advertise esi other than 0, + it must be set as dictionary type. + If esi is dictionary type, 'type' key must be set + and specifies ESI type. + For the supported ESI type, see :py:mod:`ryu.lib.packet.bgp.EvpnEsi`. + The remaining arguments are the same as that for + the corresponding class. ``ethernet_tag_id`` specifies the Ethernet Tag ID. @@ -560,22 +686,39 @@ ``ip_prefix`` specifies an IPv4 or IPv6 prefix to advertise. ``gw_ip_addr`` specifies an IPv4 or IPv6 address of - gateway to advertise. + gateway to advertise. ``vni`` specifies an Virtual Network Identifier for VXLAN or Virtual Subnet Identifier for NVGRE. - If tunnel_type is not 'vxlan' or 'nvgre', this field is ignored. + If tunnel_type is not TUNNEL_TYPE_VXLAN or TUNNEL_TYPE_NVGRE, + this field is ignored. ``next_hop`` specifies the next hop address for this prefix. ``tunnel_type`` specifies the data plane encapsulation type - to advertise. By the default, this encapsulation attribute is - not advertised. + to advertise. By the default, this attribute is not advertised. + The supported encapsulation types are following. + + - TUNNEL_TYPE_VXLAN = 'vxlan' + - TUNNEL_TYPE_NVGRE = 'nvgre - ```pmsi_tunnel_type`` specifies the type of the PMSI tunnel attribute - used to encode the multicast tunnel identifier. - This field is advertised only if route_type is - EVPN_MULTICAST_ETAG_ROUTE. + ``pmsi_tunnel_type`` specifies the type of the PMSI tunnel attribute + used to encode the multicast tunnel identifier. + This attribute is advertised only if route_type is + EVPN_MULTICAST_ETAG_ROUTE and not advertised by the default. + This attribute can also carry vni if tunnel_type is specified. + The supported PMSI tunnel types are following. + + - PMSI_TYPE_NO_TUNNEL_INFO = 0 + - PMSI_TYPE_INGRESS_REP = 6 + + ``redundancy_mode`` specifies a redundancy mode type. + This attribute is advertised only if route_type is + EVPN_ETH_AUTO_DISCOVERY and not advertised by the default. + The supported redundancy mode types are following. + + - REDUNDANCY_MODE_ALL_ACTIVE = 'all_active' + - REDUNDANCY_MODE_SINGLE_ACTIVE = 'single_active' """ func_name = 'evpn_prefix.add_local' @@ -589,11 +732,28 @@ NEXT_HOP: next_hop} # Set optional arguments - if tunnel_type: + if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]: kwargs[TUNNEL_TYPE] = tunnel_type + elif tunnel_type is not None: + raise ValueError('Unsupported tunnel type: %s' % tunnel_type) # Set route type specific arguments - if route_type == EVPN_MAC_IP_ADV_ROUTE: + if route_type == EVPN_ETH_AUTO_DISCOVERY: + kwargs.update({ + EVPN_ESI: esi, + EVPN_ETHERNET_TAG_ID: ethernet_tag_id, + }) + if vni is not None: + kwargs[EVPN_VNI] = vni + # Set Redundancy Mode Attribute arguments + if redundancy_mode in [ + REDUNDANCY_MODE_ALL_ACTIVE, + REDUNDANCY_MODE_SINGLE_ACTIVE]: + kwargs[REDUNDANCY_MODE] = redundancy_mode + elif redundancy_mode is not None: + raise ValueError('Unsupported Redundancy Mode: %s' % + redundancy_mode) + elif route_type == EVPN_MAC_IP_ADV_ROUTE: kwargs.update({ EVPN_ESI: esi, EVPN_ETHERNET_TAG_ID: ethernet_tag_id, @@ -608,7 +768,9 @@ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, IP_ADDR: ip_addr, }) - + # Set tunnel type specific arguments + if tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE]: + kwargs[EVPN_VNI] = vni # Set PMSI Tunnel Attribute arguments if pmsi_tunnel_type in [ PMSI_TYPE_NO_TUNNEL_INFO, @@ -617,6 +779,11 @@ elif pmsi_tunnel_type is not None: raise ValueError('Unsupported PMSI tunnel type: %s' % pmsi_tunnel_type) + elif route_type == EVPN_ETH_SEGMENT: + kwargs.update({ + EVPN_ESI: esi, + IP_ADDR: ip_addr, + }) elif route_type == EVPN_IP_PREFIX_ROUTE: kwargs.update({ EVPN_ESI: esi, @@ -641,8 +808,7 @@ ``route_dist`` specifies a route distinguisher value. - ``esi`` is an integer value to specify the Ethernet Segment - Identifier. 0 is the default and denotes a single-homed site. + ``esi`` is an value to specify the Ethernet Segment Identifier. ``ethernet_tag_id`` specifies the Ethernet Tag ID. @@ -659,10 +825,14 @@ ROUTE_DISTINGUISHER: route_dist} # Set route type specific arguments - if route_type == EVPN_MAC_IP_ADV_ROUTE: + if route_type == EVPN_ETH_AUTO_DISCOVERY: kwargs.update({ EVPN_ESI: esi, EVPN_ETHERNET_TAG_ID: ethernet_tag_id, + }) + elif route_type == EVPN_MAC_IP_ADV_ROUTE: + kwargs.update({ + EVPN_ETHERNET_TAG_ID: ethernet_tag_id, MAC_ADDR: mac_addr, IP_ADDR: ip_addr, }) @@ -671,6 +841,11 @@ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, IP_ADDR: ip_addr, }) + elif route_type == EVPN_ETH_SEGMENT: + kwargs.update({ + EVPN_ESI: esi, + IP_ADDR: ip_addr, + }) elif route_type == EVPN_IP_PREFIX_ROUTE: kwargs.update({ EVPN_ETHERNET_TAG_ID: ethernet_tag_id, @@ -681,6 +856,138 @@ call(func_name, **kwargs) + def flowspec_prefix_add(self, flowspec_family, rules, route_dist=None, + actions=None): + """ This method adds a new Flow Specification prefix to be advertised. + + ``flowspec_family`` specifies one of the flowspec family name. + This parameter must be one of the following. + + - FLOWSPEC_FAMILY_IPV4 = 'ipv4fs' + - FLOWSPEC_FAMILY_IPV6 = 'ipv6fs' + - FLOWSPEC_FAMILY_VPNV4 = 'vpnv4fs' + - FLOWSPEC_FAMILY_VPNV6 = 'vpnv6fs' + - FLOWSPEC_FAMILY_L2VPN = 'l2vpnfs' + + ``rules`` specifies NLRIs of Flow Specification as + a dictionary type value. + For the supported NLRI types and arguments, + see `from_user()` method of the following classes. + + - :py:mod:`ryu.lib.packet.bgp.FlowSpecIPv4NLRI` + - :py:mod:`ryu.lib.packet.bgp.FlowSpecIPv6NLRI` + - :py:mod:`ryu.lib.packet.bgp.FlowSpecVPNv4NLRI` + - :py:mod:`ryu.lib.packet.bgp.FlowSpecVPNv6NLRI` + - :py:mod:`ryu.lib.packet.bgp.FlowSpecL2VPNNLRI` + + ``route_dist`` specifies a route distinguisher value. + This parameter is required only if flowspec_family is one of the + following address family. + + - FLOWSPEC_FAMILY_VPNV4 = 'vpnv4fs' + - FLOWSPEC_FAMILY_VPNV6 = 'vpnv6fs' + - FLOWSPEC_FAMILY_L2VPN = 'l2vpnfs' + + ``actions`` specifies Traffic Filtering Actions of + Flow Specification as a dictionary type value. + The keys are "ACTION_NAME" for each action class and + values are used for the arguments to that class. + For the supported "ACTION_NAME" and arguments, + see the following table. + + =============== =============================================================== + ACTION_NAME Action Class + =============== =============================================================== + traffic_rate :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTrafficRateCommunity` + traffic_action :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTrafficActionCommunity` + redirect :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecRedirectCommunity` + traffic_marking :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTrafficMarkingCommunity` + vlan_action :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecVlanActionCommunity` + tpid_action :py:mod:`ryu.lib.packet.bgp.BGPFlowSpecTPIDActionCommunity` + =============== =============================================================== + + Example(IPv4):: + + >>> speaker = BGPSpeaker(as_number=65001, router_id='172.17.0.1') + >>> speaker.neighbor_add(address='172.17.0.2', + ... remote_as=65002, + ... enable_ipv4fs=True) + >>> speaker.flowspec_prefix_add( + ... flowspec_family=FLOWSPEC_FAMILY_IPV4, + ... rules={ + ... 'dst_prefix': '10.60.1.0/24' + ... }, + ... actions={ + ... 'traffic_marking': { + ... 'dscp': 24 + ... } + ... } + ... ) + + Example(VPNv4):: + + >>> speaker = BGPSpeaker(as_number=65001, router_id='172.17.0.1') + >>> speaker.neighbor_add(address='172.17.0.2', + ... remote_as=65002, + ... enable_vpnv4fs=True) + >>> speaker.vrf_add(route_dist='65001:100', + ... import_rts=['65001:100'], + ... export_rts=['65001:100'], + ... route_family=RF_VPNV4_FLOWSPEC) + >>> speaker.flowspec_prefix_add( + ... flowspec_family=FLOWSPEC_FAMILY_VPNV4, + ... route_dist='65000:100', + ... rules={ + ... 'dst_prefix': '10.60.1.0/24' + ... }, + ... actions={ + ... 'traffic_marking': { + ... 'dscp': 24 + ... } + ... } + ... ) + """ + func_name = 'flowspec.add' + + # Set required arguments + kwargs = { + FLOWSPEC_FAMILY: flowspec_family, + FLOWSPEC_RULES: rules, + FLOWSPEC_ACTIONS: actions or {}, + } + + if flowspec_family in [FLOWSPEC_FAMILY_VPNV4, FLOWSPEC_FAMILY_VPNV6, + FLOWSPEC_FAMILY_L2VPN]: + func_name = 'flowspec.add_local' + kwargs.update({ROUTE_DISTINGUISHER: route_dist}) + + call(func_name, **kwargs) + + def flowspec_prefix_del(self, flowspec_family, rules, route_dist=None): + """ This method deletes an advertised Flow Specification route. + + ``flowspec_family`` specifies one of the flowspec family name. + + ``rules`` specifies NLRIs of Flow Specification as + a dictionary type value. + + ``route_dist`` specifies a route distinguisher value. + """ + func_name = 'flowspec.del' + + # Set required arguments + kwargs = { + FLOWSPEC_FAMILY: flowspec_family, + FLOWSPEC_RULES: rules, + } + + if flowspec_family in [FLOWSPEC_FAMILY_VPNV4, FLOWSPEC_FAMILY_VPNV6, + FLOWSPEC_FAMILY_L2VPN]: + func_name = 'flowspec.del_local' + kwargs.update({ROUTE_DISTINGUISHER: route_dist}) + + call(func_name, **kwargs) + def vrf_add(self, route_dist, import_rts, export_rts, site_of_origins=None, route_family=RF_VPN_V4, multi_exit_disc=None): """ This method adds a new vrf used for VPN. @@ -695,14 +1002,20 @@ This parameter must be a list of string. ``route_family`` specifies route family of the VRF. - This parameter must be RF_VPN_V4, RF_VPN_V6 or RF_L2_EVPN. + This parameter must be one of the following. + + - RF_VPN_V4 (default) = 'ipv4' + - RF_VPN_V6 = 'ipv6' + - RF_L2_EVPN = 'evpn' + - RF_VPNV4_FLOWSPEC = 'ipv4fs' + - RF_VPNV6_FLOWSPEC = 'ipv6fs' + - RF_L2VPN_FLOWSPEC = 'l2vpnfs' ``multi_exit_disc`` specifies multi exit discriminator (MED) value. It must be an integer. """ - - assert route_family in SUPPORTED_VRF_RF,\ - 'route_family must be RF_VPN_V4, RF_VPN_V6 or RF_L2_EVPN' + if route_family not in SUPPORTED_VRF_RF: + raise ValueError('Unsupported route_family: %s' % route_family) vrf = { vrfs.ROUTE_DISTINGUISHER: route_dist, @@ -729,21 +1042,27 @@ route_family='all', format='json'): """ This method returns the existing vrfs. - ``subcommand`` specifies the subcommand. - - 'routes': shows routes present for vrf + ``subcommand`` specifies one of the following. - 'summary': shows configuration and summary of vrf + - 'routes': shows routes present for vrf + - 'summary': shows configuration and summary of vrf ``route_dist`` specifies a route distinguisher value. If route_family is not 'all', this value must be specified. ``route_family`` specifies route family of the VRF. - This parameter must be RF_VPN_V4, RF_VPN_V6 or RF_L2_EVPN - or 'all'. + This parameter must be one of the following. + + - RF_VPN_V4 = 'ipv4' + - RF_VPN_V6 = 'ipv6' + - RF_L2_EVPN = 'evpn' + - 'all' (default) ``format`` specifies the format of the response. - This parameter must be 'json' or 'cli'. + This parameter must be one of the following. + + - 'json' (default) + - 'cli' """ show = { 'format': format, @@ -763,7 +1082,10 @@ ``family`` specifies the address family of the RIB (e.g. 'ipv4'). ``format`` specifies the format of the response. - This parameter must be 'json' or 'cli'. + This parameter must be one of the following. + + - 'json' (default) + - 'cli' """ show = { 'params': ['rib', family], @@ -779,15 +1101,17 @@ ``route_type`` This parameter is necessary for only received-routes and sent-routes. - received-routes : paths received and not withdrawn by given peer - - sent-routes : paths sent and not withdrawn to given peer + - received-routes : paths received and not withdrawn by given peer + - sent-routes : paths sent and not withdrawn to given peer ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. ``format`` specifies the format of the response. - This parameter must be 'json' or 'cli'. + This parameter must be one of the following. + + - 'json' (default) + - 'cli' """ show = { 'format': format, @@ -803,7 +1127,10 @@ """ This method returns a list of the BGP neighbors. ``format`` specifies the format of the response. - This parameter must be 'json' or 'cli'. + This parameter must be one of the following. + + - 'json' (default) + - 'cli' """ show = { 'params': ['neighbor'], @@ -813,11 +1140,11 @@ return call('operator.show', **show) def _set_filter(self, filter_type, address, filters): - assert filter_type in ('in', 'out'),\ - 'filter type must be \'in\' or \'out\'' + assert filter_type in ('in', 'out'), ( + "filter type must be 'in' or 'out'") - assert all(isinstance(f, Filter) for f in filters),\ - 'all the items in filters must be an instance of Filter sub-class' + assert all(isinstance(f, Filter) for f in filters), ( + 'all the items in filters must be an instance of Filter sub-class') if filters is None: filters = [] @@ -953,7 +1280,10 @@ are added. ``route_family`` specifies route family of the VRF. - This parameter must be RF_VPN_V4 or RF_VPN_V6. + This parameter must be one of the following. + + - RF_VPN_V4 (default) = 'ipv4' + - RF_VPN_V6 = 'ipv6' We can set AttributeMap to a neighbor as follows:: @@ -966,8 +1296,8 @@ speaker.attribute_map_set('192.168.50.102', [attribute_map]) """ - assert route_family in (RF_VPN_V4, RF_VPN_V6),\ - 'route_family must be RF_VPN_V4 or RF_VPN_V6' + if route_family not in SUPPORTED_VRF_RF: + raise ValueError('Unsupported route_family: %s' % route_family) func_name = 'neighbor.attribute_map.set' param = { @@ -989,13 +1319,16 @@ ``route_dist`` specifies route distinguisher that has attribute_maps. ``route_family`` specifies route family of the VRF. - This parameter must be RF_VPN_V4 or RF_VPN_V6. + This parameter must be one of the following. + + - RF_VPN_V4 (default) = 'ipv4' + - RF_VPN_V6 = 'ipv6' Returns a list object containing an instance of AttributeMap """ - assert route_family in (RF_VPN_V4, RF_VPN_V6),\ - 'route_family must be RF_VPN_V4 or RF_VPN_V6' + if route_family not in SUPPORTED_VRF_RF: + raise ValueError('Unsupported route_family: %s' % route_family) func_name = 'neighbor.attribute_map.get' param = { diff -Nru ryu-4.9/ryu/services/protocols/bgp/core_managers/table_manager.py ryu-4.15/ryu/services/protocols/bgp/core_managers/table_manager.py --- ryu-4.9/ryu/services/protocols/bgp/core_managers/table_manager.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/core_managers/table_manager.py 2017-07-02 11:08:32.000000000 +0000 @@ -15,10 +15,27 @@ from ryu.services.protocols.bgp.info_base.vrf6 import Vrf6Table from ryu.services.protocols.bgp.info_base.vrfevpn import VrfEvpnTable from ryu.services.protocols.bgp.info_base.evpn import EvpnTable +from ryu.services.protocols.bgp.info_base.ipv4fs import IPv4FlowSpecPath +from ryu.services.protocols.bgp.info_base.ipv4fs import IPv4FlowSpecTable +from ryu.services.protocols.bgp.info_base.vpnv4fs import VPNv4FlowSpecTable +from ryu.services.protocols.bgp.info_base.vrf4fs import Vrf4FlowSpecTable +from ryu.services.protocols.bgp.info_base.ipv6fs import IPv6FlowSpecPath +from ryu.services.protocols.bgp.info_base.ipv6fs import IPv6FlowSpecTable +from ryu.services.protocols.bgp.info_base.vpnv6fs import VPNv6FlowSpecTable +from ryu.services.protocols.bgp.info_base.vrf6fs import Vrf6FlowSpecTable +from ryu.services.protocols.bgp.info_base.l2vpnfs import L2VPNFlowSpecTable +from ryu.services.protocols.bgp.info_base.vrfl2vpnfs import L2vpnFlowSpecPath +from ryu.services.protocols.bgp.info_base.vrfl2vpnfs import L2vpnFlowSpecTable from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4 from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV6 from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_L2_EVPN +from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4_FLOWSPEC +from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV6_FLOWSPEC +from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_L2VPN_FLOWSPEC from ryu.services.protocols.bgp.rtconf.vrfs import SUPPORTED_VRF_RF +from ryu.services.protocols.bgp.utils.bgp import create_v4flowspec_actions +from ryu.services.protocols.bgp.utils.bgp import create_v6flowspec_actions +from ryu.services.protocols.bgp.utils.bgp import create_l2vpnflowspec_actions from ryu.lib import type_desc from ryu.lib.packet.bgp import RF_IPv4_UC @@ -26,17 +43,29 @@ from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.lib.packet.bgp import RF_IPv6_VPN from ryu.lib.packet.bgp import RF_L2_EVPN +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC from ryu.lib.packet.bgp import RF_RTC_UC from ryu.lib.packet.bgp import BGPPathAttributeOrigin from ryu.lib.packet.bgp import BGPPathAttributeAsPath +from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_IGP +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES +from ryu.lib.packet.bgp import EvpnEsi from ryu.lib.packet.bgp import EvpnArbitraryEsi from ryu.lib.packet.bgp import EvpnNLRI from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI +from ryu.lib.packet.bgp import EvpnInclusiveMulticastEthernetTagNLRI from ryu.lib.packet.bgp import IPAddrPrefix from ryu.lib.packet.bgp import IP6AddrPrefix +from ryu.lib.packet.bgp import FlowSpecIPv4NLRI +from ryu.lib.packet.bgp import FlowSpecIPv6NLRI +from ryu.lib.packet.bgp import FlowSpecL2VPNNLRI from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4 from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4_prefix @@ -117,6 +146,12 @@ vpn_table = self.get_vpn6_table() elif vrf_table.route_family == VrfEvpnTable.ROUTE_FAMILY: vpn_table = self.get_evpn_table() + elif vrf_table.route_family == Vrf4FlowSpecTable.ROUTE_FAMILY: + vpn_table = self.get_vpnv4fs_table() + elif vrf_table.route_family == Vrf6FlowSpecTable.ROUTE_FAMILY: + vpn_table = self.get_vpnv6fs_table() + elif vrf_table.route_family == L2vpnFlowSpecTable.ROUTE_FAMILY: + vpn_table = self.get_l2vpnfs_table() else: raise ValueError('Invalid VRF table route family: %s' % vrf_table.route_family) @@ -183,6 +218,16 @@ global_table = self.get_vpn6_table() elif route_family == RF_L2_EVPN: global_table = self.get_evpn_table() + elif route_family == RF_IPv4_FLOWSPEC: + global_table = self.get_ipv4fs_table() + elif route_family == RF_IPv6_FLOWSPEC: + global_table = self.get_ipv6fs_table() + elif route_family == RF_VPNv4_FLOWSPEC: + global_table = self.get_vpnv4fs_table() + elif route_family == RF_VPNv6_FLOWSPEC: + global_table = self.get_vpnv6fs_table() + elif route_family == RF_L2VPN_FLOWSPEC: + global_table = self.get_l2vpnfs_table() elif route_family == RF_RTC_UC: global_table = self.get_rtc_table() @@ -293,6 +338,81 @@ self._next_vpnv4_label += 1 return lbl + def get_ipv4fs_table(self): + """Returns global IPv4 Flow Specification table. + + Creates the table if it does not exist. + """ + ipv4fs_table = self._global_tables.get(RF_IPv4_FLOWSPEC) + # Lazy initialization of the table. + if not ipv4fs_table: + ipv4fs_table = IPv4FlowSpecTable(self._core_service, + self._signal_bus) + self._global_tables[RF_IPv4_FLOWSPEC] = ipv4fs_table + self._tables[(None, RF_IPv4_FLOWSPEC)] = ipv4fs_table + + return ipv4fs_table + + def get_ipv6fs_table(self): + """Returns global IPv6 Flow Specification table. + + Creates the table if it does not exist. + """ + ipv6fs_table = self._global_tables.get(RF_IPv6_FLOWSPEC) + # Lazy initialization of the table. + if not ipv6fs_table: + ipv6fs_table = IPv6FlowSpecTable(self._core_service, + self._signal_bus) + self._global_tables[RF_IPv6_FLOWSPEC] = ipv6fs_table + self._tables[(None, RF_IPv6_FLOWSPEC)] = ipv6fs_table + + return ipv6fs_table + + def get_vpnv4fs_table(self): + """Returns global VPNv4 Flow Specification table. + + Creates the table if it does not exist. + """ + vpnv4fs_table = self._global_tables.get(RF_VPNv4_FLOWSPEC) + # Lazy initialization of the table. + if not vpnv4fs_table: + vpnv4fs_table = VPNv4FlowSpecTable(self._core_service, + self._signal_bus) + self._global_tables[RF_VPNv4_FLOWSPEC] = vpnv4fs_table + self._tables[(None, RF_VPNv4_FLOWSPEC)] = vpnv4fs_table + + return vpnv4fs_table + + def get_vpnv6fs_table(self): + """Returns global VPNv6 Flow Specification table. + + Creates the table if it does not exist. + """ + vpnv6fs_table = self._global_tables.get(RF_VPNv6_FLOWSPEC) + # Lazy initialization of the table. + if not vpnv6fs_table: + vpnv6fs_table = VPNv6FlowSpecTable(self._core_service, + self._signal_bus) + self._global_tables[RF_VPNv6_FLOWSPEC] = vpnv6fs_table + self._tables[(None, RF_VPNv6_FLOWSPEC)] = vpnv6fs_table + + return vpnv6fs_table + + def get_l2vpnfs_table(self): + """Returns global L2VPN Flow Specification table. + + Creates the table if it does not exist. + """ + l2vpnfs_table = self._global_tables.get(RF_L2VPN_FLOWSPEC) + # Lazy initialization of the table. + if not l2vpnfs_table: + l2vpnfs_table = L2VPNFlowSpecTable(self._core_service, + self._signal_bus) + self._global_tables[RF_L2VPN_FLOWSPEC] = l2vpnfs_table + self._tables[(None, RF_L2VPN_FLOWSPEC)] = l2vpnfs_table + + return l2vpnfs_table + def get_nexthop_label(self, label_key): return self._next_hop_label.get(label_key, None) @@ -372,6 +492,12 @@ vrf_table = Vrf6Table elif route_family == VRF_RF_L2_EVPN: vrf_table = VrfEvpnTable + elif route_family == VRF_RF_IPV4_FLOWSPEC: + vrf_table = Vrf4FlowSpecTable + elif route_family == VRF_RF_IPV6_FLOWSPEC: + vrf_table = Vrf6FlowSpecTable + elif route_family == VRF_RF_L2VPN_FLOWSPEC: + vrf_table = L2vpnFlowSpecTable else: raise ValueError('Unsupported route family for VRF: %s' % route_family) @@ -455,6 +581,12 @@ route_family = RF_IPv6_UC elif vpn_path.route_family == RF_L2_EVPN: route_family = RF_L2_EVPN + elif vpn_path.route_family == RF_VPNv4_FLOWSPEC: + route_family = RF_IPv4_FLOWSPEC + elif vpn_path.route_family == RF_VPNv6_FLOWSPEC: + route_family = RF_IPv6_FLOWSPEC + elif vpn_path.route_family == RF_L2VPN_FLOWSPEC: + route_family = RF_L2VPN_FLOWSPEC else: raise ValueError('Unsupported route family for VRF: %s' % vpn_path.route_family) @@ -483,7 +615,8 @@ def update_vrf_table(self, route_dist, prefix=None, next_hop=None, route_family=None, route_type=None, tunnel_type=None, - is_withdraw=False, pmsi_tunnel_type=None, **kwargs): + is_withdraw=False, redundancy_mode=None, + pmsi_tunnel_type=None, **kwargs): """Update a BGP route in the VRF table identified by `route_dist` with the given `next_hop`. @@ -496,6 +629,8 @@ If `route_family` is VRF_RF_L2_EVPN, `route_type` and `kwargs` are required to construct EVPN NLRI and `prefix` is ignored. + ``redundancy_mode`` specifies a redundancy mode type. + ` `pmsi_tunnel_type` specifies the type of the PMSI tunnel attribute used to encode the multicast tunnel identifier. This field is advertised only if route_type is @@ -522,6 +657,8 @@ desc='VRF table does not exist: route_dist=%s, ' 'route_family=%s' % (route_dist, route_family)) + vni = kwargs.get('vni', None) + if route_family == VRF_RF_IPV4: if not is_valid_ipv4_prefix(prefix): raise BgpCoreError(desc='Invalid IPv4 prefix: %s' % prefix) @@ -537,18 +674,28 @@ if route_type == EvpnMacIPAdvertisementNLRI.ROUTE_TYPE_NAME: # MPLS labels will be assigned automatically kwargs['mpls_labels'] = [] + if route_type == EvpnInclusiveMulticastEthernetTagNLRI.ROUTE_TYPE_NAME: + # Inclusive Multicast Ethernet Tag Route does not have "vni", + # omit "vni" from "kwargs" here. + vni = kwargs.pop('vni', None) subclass = EvpnNLRI._lookup_type_name(route_type) kwargs['route_dist'] = route_dist esi = kwargs.get('esi', None) if esi is not None: - # Note: Currently, we support arbitrary 9-octet ESI value only. - kwargs['esi'] = EvpnArbitraryEsi(type_desc.Int9.from_user(esi)) - if 'vni' in kwargs: - # Disable to generate MPLS labels, because encapsulation type - # is not MPLS. + if isinstance(esi, dict): + esi_type = esi.get('type', 0) + esi_class = EvpnEsi._lookup_type(esi_type) + kwargs['esi'] = esi_class.from_jsondict(esi) + else: # isinstance(esi, numbers.Integral) + kwargs['esi'] = EvpnArbitraryEsi( + type_desc.Int9.from_user(esi)) + if vni is not None: + # Disable to generate MPLS labels, + # because encapsulation type is not MPLS. from ryu.services.protocols.bgp.api.prefix import ( TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE) - assert tunnel_type in [TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE] + assert tunnel_type in [ + None, TUNNEL_TYPE_VXLAN, TUNNEL_TYPE_NVGRE] gen_lbl = False prefix = subclass(**kwargs) else: @@ -559,9 +706,71 @@ # withdrawal. Hence multiple withdrawals have not side effect. return vrf_table.insert_vrf_path( nlri=prefix, next_hop=next_hop, gen_lbl=gen_lbl, - is_withdraw=is_withdraw, tunnel_type=tunnel_type, + is_withdraw=is_withdraw, redundancy_mode=redundancy_mode, + vni=vni, tunnel_type=tunnel_type, pmsi_tunnel_type=pmsi_tunnel_type) + def update_flowspec_vrf_table(self, flowspec_family, route_dist, rules, + actions=None, is_withdraw=False): + """Update a BGP route in the VRF table for Flow Specification. + + ``flowspec_family`` specifies one of the flowspec family name. + + ``route_dist`` specifies a route distinguisher value. + + ``rules`` specifies NLRIs of Flow Specification as + a dictionary type value. + + `` actions`` specifies Traffic Filtering Actions of + Flow Specification as a dictionary type value. + + If `is_withdraw` is False, which is the default, add a BGP route + to the VRF table identified by `route_dist`. + If `is_withdraw` is True, remove a BGP route from the VRF table. + """ + from ryu.services.protocols.bgp.core import BgpCoreError + from ryu.services.protocols.bgp.api.prefix import ( + FLOWSPEC_FAMILY_VPNV4, + FLOWSPEC_FAMILY_VPNV6, + FLOWSPEC_FAMILY_L2VPN, + ) + + if flowspec_family == FLOWSPEC_FAMILY_VPNV4: + vrf_table = self._tables.get((route_dist, VRF_RF_IPV4_FLOWSPEC)) + prefix = FlowSpecIPv4NLRI.from_user(**rules) + try: + communities = create_v4flowspec_actions(actions) + except ValueError as e: + raise BgpCoreError(desc=str(e)) + elif flowspec_family == FLOWSPEC_FAMILY_VPNV6: + vrf_table = self._tables.get((route_dist, VRF_RF_IPV6_FLOWSPEC)) + prefix = FlowSpecIPv6NLRI.from_user(**rules) + try: + communities = create_v6flowspec_actions(actions) + except ValueError as e: + raise BgpCoreError(desc=str(e)) + elif flowspec_family == FLOWSPEC_FAMILY_L2VPN: + vrf_table = self._tables.get((route_dist, VRF_RF_L2VPN_FLOWSPEC)) + prefix = FlowSpecL2VPNNLRI.from_user(route_dist, **rules) + try: + communities = create_l2vpnflowspec_actions(actions) + except ValueError as e: + raise BgpCoreError(desc=str(e)) + else: + raise BgpCoreError( + desc='Unsupported flowspec_family %s' % flowspec_family) + + if vrf_table is None: + raise BgpCoreError( + desc='VRF table does not exist: route_dist=%s, ' + 'flowspec_family=%s' % (route_dist, flowspec_family)) + + # We do not check if we have a path to given prefix, we issue + # withdrawal. Hence multiple withdrawals have not side effect. + vrf_table.insert_vrffs_path( + nlri=prefix, communities=communities, + is_withdraw=is_withdraw) + def update_global_table(self, prefix, next_hop=None, is_withdraw=False): """Update a BGP route in the Global table for the given `prefix` with the given `next_hop`. @@ -598,7 +807,92 @@ pattrs=pathattrs, nexthop=next_hop, is_withdraw=is_withdraw) - # add to global ipv4 table and propagates to neighbors + # add to global table and propagates to neighbors + self.learn_path(new_path) + + def update_flowspec_global_table(self, flowspec_family, rules, + actions=None, is_withdraw=False): + """Update a BGP route in the Global table for Flow Specification. + + ``flowspec_family`` specifies one of the Flow Specification + family name. + + ``rules`` specifies NLRIs of Flow Specification as + a dictionary type value. + + `` actions`` specifies Traffic Filtering Actions of + Flow Specification as a dictionary type value. + + If `is_withdraw` is False, which is the default, add a BGP route + to the Global table. + If `is_withdraw` is True, remove a BGP route from the Global table. + """ + + from ryu.services.protocols.bgp.core import BgpCoreError + from ryu.services.protocols.bgp.api.prefix import ( + FLOWSPEC_FAMILY_IPV4, + FLOWSPEC_FAMILY_IPV6, + FLOWSPEC_FAMILY_L2VPN, + ) + + src_ver_num = 1 + peer = None + + # set mandatory path attributes + origin = BGPPathAttributeOrigin(BGP_ATTR_ORIGIN_IGP) + aspath = BGPPathAttributeAsPath([[]]) + + pathattrs = OrderedDict() + pathattrs[BGP_ATTR_TYPE_ORIGIN] = origin + pathattrs[BGP_ATTR_TYPE_AS_PATH] = aspath + + if flowspec_family == FLOWSPEC_FAMILY_IPV4: + _nlri = FlowSpecIPv4NLRI.from_user(**rules) + p = IPv4FlowSpecPath + + try: + communities = create_v4flowspec_actions(actions) + except ValueError as e: + raise BgpCoreError(desc=str(e)) + + if communities: + pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( + BGPPathAttributeExtendedCommunities( + communities=communities)) + elif flowspec_family == FLOWSPEC_FAMILY_IPV6: + _nlri = FlowSpecIPv6NLRI.from_user(**rules) + p = IPv6FlowSpecPath + + try: + communities = create_v6flowspec_actions(actions) + except ValueError as e: + raise BgpCoreError(desc=str(e)) + + if communities: + pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( + BGPPathAttributeExtendedCommunities( + communities=communities)) + elif flowspec_family == FLOWSPEC_FAMILY_L2VPN: + _nlri = FlowSpecL2VPNNLRI.from_user(**rules) + p = L2vpnFlowSpecPath + + try: + communities = create_l2vpnflowspec_actions(actions) + except ValueError as e: + raise BgpCoreError(desc=str(e)) + + if communities: + pathattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( + BGPPathAttributeExtendedCommunities( + communities=communities)) + else: + raise BgpCoreError( + desc='Unsupported flowspec family %s' % flowspec_family) + + new_path = p(peer, _nlri, src_ver_num, + pattrs=pathattrs, is_withdraw=is_withdraw) + + # add to global table and propagates to neighbors self.learn_path(new_path) def clean_stale_routes(self, peer, route_family=None): diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/base.py ryu-4.15/ryu/services/protocols/bgp/info_base/base.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/base.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/base.py 2017-07-02 11:08:32.000000000 +0000 @@ -843,8 +843,7 @@ return self._source is None def has_nexthop(self): - return not (not self._nexthop or self._nexthop == '0.0.0.0' or - self._nexthop == '::') + return self._nexthop and self._nexthop not in ('0.0.0.0', '::') def __str__(self): return ( @@ -1061,20 +1060,17 @@ Meaning of each policy is as follows: - * POLICY_TOP - Filter checks if the specified AS number is at the top of - AS_PATH attribute. - - * POLICY_END - Filter checks is the specified AS number - is at the last of AS_PATH attribute. - - * POLICY_INCLUDE - Filter checks if specified AS number - exists in AS_PATH attribute - - * POLICY_NOT_INCLUDE - Opposite to POLICY_INCLUDE + ================== ================================================== + Policy Description + ================== ================================================== + POLICY_TOP Filter checks if the specified AS number + is at the top of AS_PATH attribute. + POLICY_END Filter checks is the specified AS number + is at the last of AS_PATH attribute. + POLICY_INCLUDE Filter checks if specified AS number exists + in AS_PATH attribute. + POLICY_NOT_INCLUDE Opposite to POLICY_INCLUDE. + ================== ================================================== """ POLICY_TOP = 2 diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/ipv4fs.py ryu-4.15/ryu/services/protocols/bgp/info_base/ipv4fs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/ipv4fs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/ipv4fs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically + for Ipv4 Flow Specification support. +""" + +import logging + +from ryu.lib.packet.bgp import FlowSpecIPv4NLRI +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC + +from ryu.services.protocols.bgp.info_base.base import Path +from ryu.services.protocols.bgp.info_base.base import Table +from ryu.services.protocols.bgp.info_base.base import Destination +from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin + +LOG = logging.getLogger('bgpspeaker.info_base.ipv4fs') + + +class IPv4FlowSpecDest(Destination, NonVrfPathProcessingMixin): + """IPv4 Flow Specification Destination + + Store Flow Specification Paths. + """ + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + + def _best_path_lost(self): + old_best_path = self._best_path + NonVrfPathProcessingMixin._best_path_lost(self) + self._core_service._signal_bus.best_path_changed(old_best_path, True) + + def _new_best_path(self, best_path): + NonVrfPathProcessingMixin._new_best_path(self, best_path) + self._core_service._signal_bus.best_path_changed(best_path, False) + + +class IPv4FlowSpecTable(Table): + """Global table to store IPv4 Flow Specification routing information. + + Uses `FlowSpecIpv4Dest` to store destination information for each known + Flow Specification paths. + """ + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + VPN_DEST_CLASS = IPv4FlowSpecDest + + def __init__(self, core_service, signal_bus): + super(IPv4FlowSpecTable, self).__init__(None, core_service, signal_bus) + + def _table_key(self, nlri): + """Return a key that will uniquely identify this NLRI inside + this table. + """ + return nlri.prefix + + def _create_dest(self, nlri): + return self.VPN_DEST_CLASS(self, nlri) + + def __str__(self): + return '%s(scope_id: %s, rf: %s)' % ( + self.__class__.__name__, self.scope_id, self.route_family + ) + + +class IPv4FlowSpecPath(Path): + """Represents a way of reaching an IPv4 Flow Specification destination.""" + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + VRF_PATH_CLASS = None # defined in init - anti cyclic import hack + NLRI_CLASS = FlowSpecIPv4NLRI + + def __init__(self, *args, **kwargs): + # Set dummy IP address. + kwargs['nexthop'] = '0.0.0.0' + super(IPv4FlowSpecPath, self).__init__(*args, **kwargs) + from ryu.services.protocols.bgp.info_base.vrf4fs import ( + Vrf4FlowSpecPath) + self.VRF_PATH_CLASS = Vrf4FlowSpecPath + # Because the IPv4 Flow Specification does not require nexthop, + # initialize with None. + self._nexthop = None diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/ipv6fs.py ryu-4.15/ryu/services/protocols/bgp/info_base/ipv6fs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/ipv6fs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/ipv6fs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,93 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically + for Ipv6 Flow Specification support. +""" + +import logging + +from ryu.lib.packet.bgp import FlowSpecIPv6NLRI +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC + +from ryu.services.protocols.bgp.info_base.base import Path +from ryu.services.protocols.bgp.info_base.base import Table +from ryu.services.protocols.bgp.info_base.base import Destination +from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin + +LOG = logging.getLogger('bgpspeaker.info_base.ipv6fs') + + +class IPv6FlowSpecDest(Destination, NonVrfPathProcessingMixin): + """IPv6 Flow Specification Destination + + Store Flow Specification Paths. + """ + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + + def _best_path_lost(self): + old_best_path = self._best_path + NonVrfPathProcessingMixin._best_path_lost(self) + self._core_service._signal_bus.best_path_changed(old_best_path, True) + + def _new_best_path(self, best_path): + NonVrfPathProcessingMixin._new_best_path(self, best_path) + self._core_service._signal_bus.best_path_changed(best_path, False) + + +class IPv6FlowSpecTable(Table): + """Global table to store IPv6 Flow Specification routing information. + + Uses `FlowSpecIpv6Dest` to store destination information for each known + Flow Specification paths. + """ + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + VPN_DEST_CLASS = IPv6FlowSpecDest + + def __init__(self, core_service, signal_bus): + super(IPv6FlowSpecTable, self).__init__(None, core_service, signal_bus) + + def _table_key(self, nlri): + """Return a key that will uniquely identify this NLRI inside + this table. + """ + return nlri.prefix + + def _create_dest(self, nlri): + return self.VPN_DEST_CLASS(self, nlri) + + def __str__(self): + return '%s(scope_id: %s, rf: %s)' % ( + self.__class__.__name__, self.scope_id, self.route_family + ) + + +class IPv6FlowSpecPath(Path): + """Represents a way of reaching an IPv6 Flow Specification destination.""" + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + VRF_PATH_CLASS = None # defined in init - anti cyclic import hack + NLRI_CLASS = FlowSpecIPv6NLRI + + def __init__(self, *args, **kwargs): + # Set dummy IP address. + kwargs['nexthop'] = '::' + super(IPv6FlowSpecPath, self).__init__(*args, **kwargs) + from ryu.services.protocols.bgp.info_base.vrf6fs import ( + Vrf6FlowSpecPath) + self.VRF_PATH_CLASS = Vrf6FlowSpecPath + # Because the IPv6 Flow Specification does not require nexthop, + # initialize with None. + self._nexthop = None diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/l2vpnfs.py ryu-4.15/ryu/services/protocols/bgp/info_base/l2vpnfs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/l2vpnfs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/l2vpnfs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically for + L2VPN Flow Specification support. +""" + +import logging + +from ryu.lib.packet.bgp import FlowSpecL2VPNNLRI +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC + +from ryu.services.protocols.bgp.info_base.vpn import VpnDest +from ryu.services.protocols.bgp.info_base.vpn import VpnPath +from ryu.services.protocols.bgp.info_base.vpn import VpnTable + +LOG = logging.getLogger('bgpspeaker.info_base.l2vpnfs') + + +class L2VPNFlowSpecDest(VpnDest): + """L2VPN Flow Specification Destination + + Store Flow Specification Paths. + """ + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + + +class L2VPNFlowSpecTable(VpnTable): + """Global table to store L2VPN Flow Specification routing information. + + Uses `L2VPNFlowSpecDest` to store destination information for each known + Flow Specification paths. + """ + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + VPN_DEST_CLASS = L2VPNFlowSpecDest + + +class L2VPNFlowSpecPath(VpnPath): + """Represents a way of reaching an L2VPN Flow Specification destination.""" + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + VRF_PATH_CLASS = None # defined in init - anti cyclic import hack + NLRI_CLASS = FlowSpecL2VPNNLRI + + def __init__(self, *args, **kwargs): + # Set dummy IP address. + kwargs['nexthop'] = '0.0.0.0' + super(L2VPNFlowSpecPath, self).__init__(*args, **kwargs) + from ryu.services.protocols.bgp.info_base.vrfl2vpnfs import( + L2vpnFlowSpecPath) + self.VRF_PATH_CLASS = L2vpnFlowSpecPath + # Because the L2VPN Flow Specification does not require nexthop, + # initialize with None. + self._nexthop = None diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vpnv4fs.py ryu-4.15/ryu/services/protocols/bgp/info_base/vpnv4fs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vpnv4fs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vpnv4fs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically for + VPNv4 Flow Specification support. +""" + +import logging + +from ryu.lib.packet.bgp import FlowSpecVPNv4NLRI +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC + +from ryu.services.protocols.bgp.info_base.vpn import VpnDest +from ryu.services.protocols.bgp.info_base.vpn import VpnPath +from ryu.services.protocols.bgp.info_base.vpn import VpnTable + +LOG = logging.getLogger('bgpspeaker.info_base.vpnv4fs') + + +class VPNv4FlowSpecDest(VpnDest): + """VPNv4 Flow Specification Destination + + Store Flow Specification Paths. + """ + ROUTE_FAMILY = RF_VPNv4_FLOWSPEC + + +class VPNv4FlowSpecTable(VpnTable): + """Global table to store VPNv4 Flow Specification routing information. + + Uses `VPNv4FlowSpecDest` to store destination information for each known + Flow Specification paths. + """ + ROUTE_FAMILY = RF_VPNv4_FLOWSPEC + VPN_DEST_CLASS = VPNv4FlowSpecDest + + +class VPNv4FlowSpecPath(VpnPath): + """Represents a way of reaching an VPNv4 Flow Specification destination.""" + ROUTE_FAMILY = RF_VPNv4_FLOWSPEC + VRF_PATH_CLASS = None # defined in init - anti cyclic import hack + NLRI_CLASS = FlowSpecVPNv4NLRI + + def __init__(self, *args, **kwargs): + # Set dummy IP address. + kwargs['nexthop'] = '0.0.0.0' + super(VPNv4FlowSpecPath, self).__init__(*args, **kwargs) + from ryu.services.protocols.bgp.info_base.vrf4fs import( + Vrf4FlowSpecPath) + self.VRF_PATH_CLASS = Vrf4FlowSpecPath + # Because the IPv4 Flow Specification does not require nexthop, + # initialize with None. + self._nexthop = None diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vpnv6fs.py ryu-4.15/ryu/services/protocols/bgp/info_base/vpnv6fs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vpnv6fs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vpnv6fs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically for + VPNv6 Flow Specification support. +""" + +import logging + +from ryu.lib.packet.bgp import FlowSpecVPNv6NLRI +from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC + +from ryu.services.protocols.bgp.info_base.vpn import VpnDest +from ryu.services.protocols.bgp.info_base.vpn import VpnPath +from ryu.services.protocols.bgp.info_base.vpn import VpnTable + +LOG = logging.getLogger('bgpspeaker.info_base.vpnv6fs') + + +class VPNv6FlowSpecDest(VpnDest): + """VPNv6 Flow Specification Destination + + Store Flow Specification Paths. + """ + ROUTE_FAMILY = RF_VPNv6_FLOWSPEC + + +class VPNv6FlowSpecTable(VpnTable): + """Global table to store VPNv6 Flow Specification routing information. + + Uses `VPNv6FlowSpecDest` to store destination information for each known + Flow Specification paths. + """ + ROUTE_FAMILY = RF_VPNv6_FLOWSPEC + VPN_DEST_CLASS = VPNv6FlowSpecDest + + +class VPNv6FlowSpecPath(VpnPath): + """Represents a way of reaching an VPNv6 Flow Specification destination.""" + ROUTE_FAMILY = RF_VPNv6_FLOWSPEC + VRF_PATH_CLASS = None # defined in init - anti cyclic import hack + NLRI_CLASS = FlowSpecVPNv6NLRI + + def __init__(self, *args, **kwargs): + # Set dummy IP address. + kwargs['nexthop'] = '::' + super(VPNv6FlowSpecPath, self).__init__(*args, **kwargs) + from ryu.services.protocols.bgp.info_base.vrf6fs import( + Vrf6FlowSpecPath) + self.VRF_PATH_CLASS = Vrf6FlowSpecPath + # Because the IPv6 Flow Specification does not require nexthop, + # initialize with None. + self._nexthop = None diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vrf4fs.py ryu-4.15/ryu/services/protocols/bgp/info_base/vrf4fs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vrf4fs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vrf4fs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,60 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically + for VRF (for IPv4 Flow Specification) support. + Represents data structures for VRF not VPN/global. + (Inside VRF you have IPv4 Flow Specification prefixes + and inside VPN you have VPNv4 Flow Specification prefixes) +""" + +import logging + +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC +from ryu.lib.packet.bgp import FlowSpecIPv4NLRI +from ryu.lib.packet.bgp import FlowSpecVPNv4NLRI + +from ryu.services.protocols.bgp.info_base.vpnv4fs import VPNv4FlowSpecPath +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecDest +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecPath +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecTable + +LOG = logging.getLogger('bgpspeaker.info_base.vrf4fs') + + +class Vrf4FlowSpecPath(VRFFlowSpecPath): + """Represents a way of reaching an IP destination with + a VPN Flow Specification. + """ + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + VPN_PATH_CLASS = VPNv4FlowSpecPath + VPN_NLRI_CLASS = FlowSpecVPNv4NLRI + + +class Vrf4FlowSpecDest(VRFFlowSpecDest): + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + + +class Vrf4FlowSpecTable(VRFFlowSpecTable): + """Virtual Routing and Forwarding information base + for IPv4 Flow Specification. + """ + ROUTE_FAMILY = RF_IPv4_FLOWSPEC + VPN_ROUTE_FAMILY = RF_VPNv4_FLOWSPEC + NLRI_CLASS = FlowSpecIPv4NLRI + VRF_PATH_CLASS = Vrf4FlowSpecPath + VRF_DEST_CLASS = Vrf4FlowSpecDest diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vrf6fs.py ryu-4.15/ryu/services/protocols/bgp/info_base/vrf6fs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vrf6fs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vrf6fs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,60 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically + for VRF (for IPv6 Flow Specification) support. + Represents data structures for VRF not VPN/global. + (Inside VRF you have IPv6 Flow Specification prefixes + and inside VPN you have VPNV6 Flow Specification prefixes) +""" + +import logging + +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC +from ryu.lib.packet.bgp import FlowSpecIPv6NLRI +from ryu.lib.packet.bgp import FlowSpecVPNv6NLRI + +from ryu.services.protocols.bgp.info_base.vpnv6fs import VPNv6FlowSpecPath +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecDest +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecPath +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecTable + +LOG = logging.getLogger('bgpspeaker.info_base.vrf6fs') + + +class Vrf6FlowSpecPath(VRFFlowSpecPath): + """Represents a way of reaching an IP destination with + a VPN Flow Specification. + """ + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + VPN_PATH_CLASS = VPNv6FlowSpecPath + VPN_NLRI_CLASS = FlowSpecVPNv6NLRI + + +class Vrf6FlowSpecDest(VRFFlowSpecDest): + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + + +class Vrf6FlowSpecTable(VRFFlowSpecTable): + """Virtual Routing and Forwarding information base + for IPv6 Flow Specification. + """ + ROUTE_FAMILY = RF_IPv6_FLOWSPEC + VPN_ROUTE_FAMILY = RF_VPNv6_FLOWSPEC + NLRI_CLASS = FlowSpecIPv6NLRI + VRF_PATH_CLASS = Vrf6FlowSpecPath + VRF_DEST_CLASS = Vrf6FlowSpecDest diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vrffs.py ryu-4.15/ryu/services/protocols/bgp/info_base/vrffs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vrffs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vrffs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,91 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines base data types and models required specifically + for VRF Flow Specification support. +""" + +import abc +import logging +import six + +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES +from ryu.lib.packet.bgp import BGPPathAttributeOrigin +from ryu.lib.packet.bgp import BGPPathAttributeAsPath +from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities + +from ryu.services.protocols.bgp.base import OrderedDict +from ryu.services.protocols.bgp.info_base.vrf import VrfTable +from ryu.services.protocols.bgp.info_base.vrf import VrfDest +from ryu.services.protocols.bgp.info_base.vrf import VrfPath + +from ryu.services.protocols.bgp.utils.bgp import create_rt_extended_community + +LOG = logging.getLogger('bgpspeaker.info_base.vrffs') + + +@six.add_metaclass(abc.ABCMeta) +class VRFFlowSpecTable(VrfTable): + """Virtual Routing and Forwarding information base. + Keeps destination imported to given VRF Flow Specification + in represents. + """ + def insert_vrffs_path(self, nlri, communities, is_withdraw=False): + assert nlri + assert isinstance(communities, list) + vrf_conf = self.vrf_conf + + from ryu.services.protocols.bgp.core import EXPECTED_ORIGIN + pattrs = OrderedDict() + pattrs[BGP_ATTR_TYPE_ORIGIN] = BGPPathAttributeOrigin( + EXPECTED_ORIGIN) + pattrs[BGP_ATTR_TYPE_AS_PATH] = BGPPathAttributeAsPath([]) + + for rt in vrf_conf.export_rts: + communities.append(create_rt_extended_community(rt, 2)) + for soo in vrf_conf.soo_list: + communities.append(create_rt_extended_community(soo, 3)) + + pattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = ( + BGPPathAttributeExtendedCommunities(communities=communities)) + + puid = self.VRF_PATH_CLASS.create_puid( + vrf_conf.route_dist, nlri.prefix) + + path = self.VRF_PATH_CLASS( + puid, None, nlri, 0, + pattrs=pattrs, is_withdraw=is_withdraw + ) + + # Insert the path into VRF table, get affected destination so that we + # can process it further. + eff_dest = self.insert(path) + # Enqueue the eff_dest for further processing. + self._signal_bus.dest_changed(eff_dest) + + +@six.add_metaclass(abc.ABCMeta) +class VRFFlowSpecDest(VrfDest): + """Base class for VRF Flow Specification.""" + + +@six.add_metaclass(abc.ABCMeta) +class VRFFlowSpecPath(VrfPath): + """Represents a way of reaching an IP destination with + a VPN Flow Specification. + """ diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vrfl2vpnfs.py ryu-4.15/ryu/services/protocols/bgp/info_base/vrfl2vpnfs.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vrfl2vpnfs.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vrfl2vpnfs.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,58 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Defines data types and models required specifically + for L2VPN support. + Represents data structures for VRF not VPN/global. + (Inside VRF you have L2VPN Flow Specification prefixes + and inside VPN you have L2VPN Flow Specification prefixes) +""" + +import logging + +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC +from ryu.lib.packet.bgp import FlowSpecL2VPNNLRI + +from ryu.services.protocols.bgp.info_base.l2vpnfs import L2VPNFlowSpecPath +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecDest +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecPath +from ryu.services.protocols.bgp.info_base.vrffs import VRFFlowSpecTable + +LOG = logging.getLogger('bgpspeaker.info_base.vrfl2vpnfs') + + +class L2vpnFlowSpecPath(VRFFlowSpecPath): + """Represents a way of reaching an IP destination with + a L2VPN Flow Specification. + """ + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + VPN_PATH_CLASS = L2VPNFlowSpecPath + VPN_NLRI_CLASS = FlowSpecL2VPNNLRI + + +class L2vpnFlowSpecDest(VRFFlowSpecDest): + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + + +class L2vpnFlowSpecTable(VRFFlowSpecTable): + """Virtual Routing and Forwarding information base + for L2VPN Flow Specification. + """ + ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + VPN_ROUTE_FAMILY = RF_L2VPN_FLOWSPEC + NLRI_CLASS = FlowSpecL2VPNNLRI + VRF_PATH_CLASS = L2vpnFlowSpecPath + VRF_DEST_CLASS = L2vpnFlowSpecDest diff -Nru ryu-4.9/ryu/services/protocols/bgp/info_base/vrf.py ryu-4.15/ryu/services/protocols/bgp/info_base/vrf.py --- ryu-4.9/ryu/services/protocols/bgp/info_base/vrf.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/info_base/vrf.py 2017-07-02 11:08:32.000000000 +0000 @@ -28,15 +28,21 @@ from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC from ryu.lib.packet.bgp import BGPPathAttributeOrigin from ryu.lib.packet.bgp import BGPPathAttributeAsPath +from ryu.lib.packet.bgp import EvpnEthernetSegmentNLRI from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities -from ryu.lib.packet.bgp import BGPTwoOctetAsSpecificExtendedCommunity from ryu.lib.packet.bgp import BGPPathAttributeMultiExitDisc from ryu.lib.packet.bgp import BGPEncapsulationExtendedCommunity +from ryu.lib.packet.bgp import BGPEvpnEsiLabelExtendedCommunity +from ryu.lib.packet.bgp import BGPEvpnEsImportRTExtendedCommunity from ryu.lib.packet.bgp import BGPPathAttributePmsiTunnel from ryu.lib.packet.bgp import PmsiTunnelIdIngressReplication from ryu.lib.packet.bgp import RF_L2_EVPN from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI from ryu.lib.packet.bgp import EvpnIpPrefixNLRI +from ryu.lib.packet.safi import ( + IP_FLOWSPEC, + VPN_FLOWSPEC, +) from ryu.services.protocols.bgp.base import OrderedDict from ryu.services.protocols.bgp.constants import VPN_TABLE @@ -44,6 +50,7 @@ from ryu.services.protocols.bgp.info_base.base import Destination from ryu.services.protocols.bgp.info_base.base import Path from ryu.services.protocols.bgp.info_base.base import Table +from ryu.services.protocols.bgp.utils.bgp import create_rt_extended_community from ryu.services.protocols.bgp.utils.stats import LOCAL_ROUTES from ryu.services.protocols.bgp.utils.stats import REMOTE_ROUTES from ryu.services.protocols.bgp.utils.stats import RESOURCE_ID @@ -164,6 +171,8 @@ # Because NLRI class is the same if the route family is EVPN, # we re-use the NLRI instance. vrf_nlri = vpn_path.nlri + elif self.ROUTE_FAMILY.safi in [IP_FLOWSPEC, VPN_FLOWSPEC]: + vrf_nlri = self.NLRI_CLASS(rules=vpn_path.nlri.rules) else: # self.VPN_ROUTE_FAMILY in [RF_IPv4_VPN, RF_IPv6_VPN] # Copy NLRI instance ip, masklen = vpn_path.nlri.prefix.split('/') @@ -221,6 +230,28 @@ label_list = [] vrf_conf = self.vrf_conf if not is_withdraw: + table_manager = self._core_service.table_manager + if gen_lbl and next_hop: + # Label per next_hop demands we use a different label + # per next_hop. Here connected interfaces are advertised per + # VRF. + label_key = (vrf_conf.route_dist, next_hop) + nh_label = table_manager.get_nexthop_label(label_key) + if not nh_label: + nh_label = table_manager.get_next_vpnv4_label() + table_manager.set_nexthop_label(label_key, nh_label) + label_list.append(nh_label) + + elif gen_lbl: + # If we do not have next_hop, get a new label. + label_list.append(table_manager.get_next_vpnv4_label()) + + # Set MPLS labels with the generated labels + if gen_lbl and isinstance(nlri, EvpnMacIPAdvertisementNLRI): + nlri.mpls_labels = label_list[:2] + elif gen_lbl and isinstance(nlri, EvpnIpPrefixNLRI): + nlri.mpls_label = label_list[0] + # Create a dictionary for path-attrs. pattrs = OrderedDict() @@ -232,20 +263,19 @@ EXPECTED_ORIGIN) pattrs[BGP_ATTR_TYPE_AS_PATH] = BGPPathAttributeAsPath([]) communities = [] - for rt in vrf_conf.export_rts: - as_num, local_admin = rt.split(':') + + # Set ES-Import Route Target + if isinstance(nlri, EvpnEthernetSegmentNLRI): subtype = 2 - communities.append(BGPTwoOctetAsSpecificExtendedCommunity( - as_number=int(as_num), - local_administrator=int(local_admin), - subtype=subtype)) + es_import = nlri.esi.mac_addr + communities.append(BGPEvpnEsImportRTExtendedCommunity( + subtype=subtype, + es_import=es_import)) + + for rt in vrf_conf.export_rts: + communities.append(create_rt_extended_community(rt, 2)) for soo in vrf_conf.soo_list: - as_num, local_admin = soo.split(':') - subtype = 3 - communities.append(BGPTwoOctetAsSpecificExtendedCommunity( - as_number=int(as_num), - local_administrator=int(local_admin), - subtype=subtype)) + communities.append(create_rt_extended_community(soo, 3)) # Set Tunnel Encapsulation Attribute tunnel_type = kwargs.get('tunnel_type', None) @@ -253,28 +283,35 @@ communities.append( BGPEncapsulationExtendedCommunity.from_str(tunnel_type)) + # Set ESI Label Extended Community + redundancy_mode = kwargs.get('redundancy_mode', None) + if redundancy_mode is not None: + subtype = 1 + flags = 0 + + from ryu.services.protocols.bgp.api.prefix import ( + REDUNDANCY_MODE_SINGLE_ACTIVE) + if redundancy_mode == REDUNDANCY_MODE_SINGLE_ACTIVE: + flags |= BGPEvpnEsiLabelExtendedCommunity.SINGLE_ACTIVE_BIT + + vni = kwargs.get('vni', None) + if vni is not None: + communities.append(BGPEvpnEsiLabelExtendedCommunity( + subtype=subtype, + flags=flags, + vni=vni)) + else: + communities.append(BGPEvpnEsiLabelExtendedCommunity( + subtype=subtype, + flags=flags, + mpls_label=label_list[0])) + pattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = \ BGPPathAttributeExtendedCommunities(communities=communities) if vrf_conf.multi_exit_disc: pattrs[BGP_ATTR_TYPE_MULTI_EXIT_DISC] = \ BGPPathAttributeMultiExitDisc(vrf_conf.multi_exit_disc) - table_manager = self._core_service.table_manager - if gen_lbl and next_hop: - # Label per next_hop demands we use a different label - # per next_hop. Here connected interfaces are advertised per - # VRF. - label_key = (vrf_conf.route_dist, next_hop) - nh_label = table_manager.get_nexthop_label(label_key) - if not nh_label: - nh_label = table_manager.get_next_vpnv4_label() - table_manager.set_nexthop_label(label_key, nh_label) - label_list.append(nh_label) - - elif gen_lbl: - # If we do not have next_hop, get a new label. - label_list.append(table_manager.get_next_vpnv4_label()) - # Set PMSI Tunnel Attribute pmsi_tunnel_type = kwargs.get('pmsi_tunnel_type', None) if pmsi_tunnel_type is not None: @@ -288,13 +325,8 @@ pattrs[BGP_ATTR_TYEP_PMSI_TUNNEL_ATTRIBUTE] = \ BGPPathAttributePmsiTunnel(pmsi_flags=0, tunnel_type=pmsi_tunnel_type, - tunnel_id=tunnel_id) - - # Set MPLS labels with the generated labels - if gen_lbl and isinstance(nlri, EvpnMacIPAdvertisementNLRI): - nlri.mpls_labels = label_list[:2] - elif gen_lbl and isinstance(nlri, EvpnIpPrefixNLRI): - nlri.mpls_label = label_list[0] + tunnel_id=tunnel_id, + vni=kwargs.get('vni', None)) puid = self.VRF_PATH_CLASS.create_puid( vrf_conf.route_dist, nlri.prefix) @@ -497,6 +529,9 @@ - `label_list`: (list) List of labels for this path. Note: other parameters are as documented in super class. """ + if self.ROUTE_FAMILY.safi in [IP_FLOWSPEC, VPN_FLOWSPEC]: + nexthop = '0.0.0.0' + Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop, is_withdraw) if label_list is None: @@ -551,6 +586,11 @@ # Because NLRI class is the same if the route family is EVPN, # we re-use the NLRI instance. vpn_nlri = self._nlri + + elif self.ROUTE_FAMILY.safi in [IP_FLOWSPEC, VPN_FLOWSPEC]: + vpn_nlri = self.VPN_NLRI_CLASS(route_dist=route_dist, + rules=self.nlri.rules) + else: # self.ROUTE_FAMILY in [RF_IPv4_UC, RF_IPv6_UC] ip, masklen = self._nlri.prefix.split('/') vpn_nlri = self.VPN_NLRI_CLASS(length=int(masklen), diff -Nru ryu-4.9/ryu/services/protocols/bgp/model.py ryu-4.15/ryu/services/protocols/bgp/model.py --- ryu-4.9/ryu/services/protocols/bgp/model.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/model.py 2017-07-02 11:08:32.000000000 +0000 @@ -97,9 +97,19 @@ from ryu.services.protocols.bgp.info_base.vrf4 import Vrf4Path from ryu.services.protocols.bgp.info_base.vrf6 import Vrf6Path from ryu.services.protocols.bgp.info_base.vrfevpn import VrfEvpnPath + from ryu.services.protocols.bgp.info_base.vrf4fs import ( + Vrf4FlowSpecPath) + from ryu.services.protocols.bgp.info_base.vrf6fs import ( + Vrf6FlowSpecPath) + from ryu.services.protocols.bgp.info_base.vrfl2vpnfs import ( + L2vpnFlowSpecPath) assert path.route_family in (Vrf4Path.ROUTE_FAMILY, Vrf6Path.ROUTE_FAMILY, - VrfEvpnPath.ROUTE_FAMILY) + VrfEvpnPath.ROUTE_FAMILY, + Vrf4FlowSpecPath.ROUTE_FAMILY, + Vrf6FlowSpecPath.ROUTE_FAMILY, + L2vpnFlowSpecPath.ROUTE_FAMILY, + ) self.sink = None self._path = path diff -Nru ryu-4.9/ryu/services/protocols/bgp/net_ctrl.py ryu-4.15/ryu/services/protocols/bgp/net_ctrl.py --- ryu-4.9/ryu/services/protocols/bgp/net_ctrl.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/net_ctrl.py 2017-07-02 11:08:32.000000000 +0000 @@ -25,6 +25,8 @@ import msgpack +from ryu.lib.packet import safi as subaddr_family + from ryu.services.protocols.bgp import api from ryu.services.protocols.bgp.api.base import ApiException from ryu.services.protocols.bgp.api.base import NEXT_HOP @@ -51,11 +53,11 @@ NC_RPC_BIND_PORT = 'apgw_rpc_bind_port' # Notification symbols -NOTF_ADD_REMOTE_PREFX = 'prefix.add_remote' -NOTF_DELETE_REMOTE_PREFX = 'prefix.delete_remote' -NOTF_ADD_LOCAL_PREFX = 'prefix.add_local' -NOTF_DELETE_LOCAL_PREFX = 'prefix.delete_local' -NOTF_LOG = 'logging' +NOTIFICATION_ADD_REMOTE_PREFIX = 'prefix.add_remote' +NOTIFICATION_DELETE_REMOTE_PREFIX = 'prefix.delete_remote' +NOTIFICATION_ADD_LOCAL_PREFIX = 'prefix.add_local' +NOTIFICATION_DELETE_LOCAL_PREFIX = 'prefix.delete_local' +NOTIFICATION_LOG = 'logging' # MessagePackRPC message type constants RPC_MSG_REQUEST = 0 @@ -94,20 +96,22 @@ and utilities that use these. It also cares about socket communication w/ RPC peer. """ + NAME_FMT = 'RpcSession%s' - def __init__(self, socket, outgoing_msg_sink_iter): - super(RpcSession, self).__init__("RpcSession(%s)" % socket) + def __init__(self, sock, outgoing_msg_sink_iter): + self.peer_name = str(sock.getpeername()) + super(RpcSession, self).__init__(self.NAME_FMT % self.peer_name) self._packer = msgpack.Packer(encoding='utf-8') self._unpacker = msgpack.Unpacker(encoding='utf-8') self._next_msgid = 0 - self._socket = socket + self._socket = sock self._outgoing_msg_sink_iter = outgoing_msg_sink_iter + self.is_connected = True def stop(self): super(RpcSession, self).stop() - LOG.critical( - 'RPC Session to %s stopped', str(self._socket.getpeername()) - ) + self.is_connected = False + LOG.info('RPC Session to %s stopped', self.peer_name) def _run(self): # Process outgoing messages in new thread. @@ -117,9 +121,7 @@ # Process incoming messages in new thread. green_in = self._spawn('net_ctrl._process_incoming', self._process_incoming_msgs) - LOG.critical( - 'RPC Session to %s started', str(self._socket.getpeername()) - ) + LOG.info('RPC Session to %s started', self.peer_name) green_in.wait() green_out.wait() @@ -158,6 +160,16 @@ for msg in self._unpacker: return msg + def _send_error_response(self, request, err_msg): + rpc_msg = self.create_error_response(request[RPC_IDX_MSG_ID], + str(err_msg)) + return self._sendall(rpc_msg) + + def _send_success_response(self, request, result): + rpc_msg = self.create_success_response(request[RPC_IDX_MSG_ID], + result) + return self._sendall(rpc_msg) + def send_notification(self, method, params): rpc_msg = self.create_notification(method, params) return self._sendall(rpc_msg) @@ -166,21 +178,22 @@ LOG.debug('NetworkController started processing incoming messages') assert self._socket - while True: + while self.is_connected: # Wait for request/response/notification from peer. msg_buff = self._recv() if len(msg_buff) == 0: - LOG.info('Peer %r disconnected.', self._socket) + LOG.info('Peer %s disconnected.', self.peer_name) + self.is_connected = False + self._socket.close() break messages = self.feed_and_get_messages(msg_buff) for msg in messages: if msg[0] == RPC_MSG_REQUEST: try: result = _handle_request(msg) - _send_success_response(self, self._socket, msg, result) + self._send_success_response(msg, result) except BGPSException as e: - _send_error_response(self, self._socket, msg, - e.message) + self._send_error_response(msg, e.message) elif msg[0] == RPC_MSG_RESPONSE: _handle_response(msg) elif msg[0] == RPC_MSG_NOTIFY: @@ -197,22 +210,22 @@ it loops forever. """ LOG.debug('NetworkController processing outgoing request list.') - # TODO(Team): handle un-expected exception breaking the loop in - # graceful manner. Discuss this with other component developers. # TODO(PH): We should try not to sent routes from bgp peer that is not # in established state. - from ryu.services.protocols.bgp.model import \ - FlexinetOutgoingRoute - while True: + from ryu.services.protocols.bgp.model import ( + FlexinetOutgoingRoute) + while self.is_connected: # sink iter is Sink instance and next is blocking so this isn't # active wait. for outgoing_msg in sink_iter: + if not self.is_connected: + self._socket.close() + return if isinstance(outgoing_msg, FlexinetOutgoingRoute): - rpc_msg = _create_prefix_notif(outgoing_msg, self) + rpc_msg = _create_prefix_notification(outgoing_msg, self) else: raise NotImplementedError( - 'Do not handle out going message' - ' of type %s' % + 'Do not handle out going message of type %s' % outgoing_msg.__class__) if rpc_msg: self._sendall(rpc_msg) @@ -241,33 +254,35 @@ self.stop() -def _create_prefix_notif(outgoing_msg, rpc_session): +def _create_prefix_notification(outgoing_msg, rpc_session): """Constructs prefix notification with data from given outgoing message. Given RPC session is used to create RPC notification message. """ - assert(outgoing_msg) + assert outgoing_msg path = outgoing_msg.path - assert(path) + assert path vpn_nlri = path.nlri - rpc_msg = None assert path.source is not None if path.source != VRF_TABLE: # Extract relevant info for update-add/update-delete. params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_dist, PREFIX: vpn_nlri.prefix, NEXT_HOP: path.nexthop, - VPN_LABEL: path.label_list[0], VRF_RF: VrfConf.rf_2_vrf_rf(path.route_family)}] + if path.nlri.ROUTE_FAMILY.safi not in (subaddr_family.IP_FLOWSPEC, + subaddr_family.VPN_FLOWSPEC): + params[VPN_LABEL] = path.label_list[0] + if not path.is_withdraw: # Create notification to NetworkController. - rpc_msg = rpc_session.create_notification(NOTF_ADD_REMOTE_PREFX, - params) + rpc_msg = rpc_session.create_notification( + NOTIFICATION_ADD_REMOTE_PREFIX, params) else: - # Create update-delete request to NetworkController.` - rpc_msg = rpc_session.create_notification(NOTF_DELETE_REMOTE_PREFX, - params) + # Create update-delete request to NetworkController. + rpc_msg = rpc_session.create_notification( + NOTIFICATION_DELETE_REMOTE_PREFIX, params) else: # Extract relevant info for update-add/update-delete. params = [{ROUTE_DISTINGUISHER: outgoing_msg.route_dist, @@ -277,12 +292,12 @@ ORIGIN_RD: path.origin_rd}] if not path.is_withdraw: # Create notification to NetworkController. - rpc_msg = rpc_session.create_notification(NOTF_ADD_LOCAL_PREFX, - params) + rpc_msg = rpc_session.create_notification( + NOTIFICATION_ADD_LOCAL_PREFIX, params) else: - # Create update-delete request to NetworkController.` - rpc_msg = rpc_session.create_notification(NOTF_DELETE_LOCAL_PREFX, - params) + # Create update-delete request to NetworkController. + rpc_msg = rpc_session.create_notification( + NOTIFICATION_DELETE_LOCAL_PREFIX, params) return rpc_msg @@ -322,7 +337,8 @@ # Outstanding requests, i.e. requests for which we are yet to receive # response from peer. We currently do not have any requests going out. self._outstanding_reqs = {} - self._rpc_session = None + # Dictionary for Peer name to RPC session. + self._rpc_sessions = {} def _run(self, *args, **kwargs): """Runs RPC server. @@ -336,24 +352,36 @@ sock_addr = (apgw_rpc_bind_ip, apgw_rpc_bind_port) LOG.debug('NetworkController started listening for connections...') - server_thread, socket = self._listen_tcp(sock_addr, - self._start_rpc_session) + server_thread, _ = self._listen_tcp(sock_addr, + self._start_rpc_session) self.pause(0) server_thread.wait() - def _start_rpc_session(self, socket): + def _start_rpc_session(self, sock): """Starts a new RPC session with given connection. """ - if self._rpc_session and self._rpc_session.started: - self._rpc_session.stop() + session_name = RpcSession.NAME_FMT % str(sock.getpeername()) + self._stop_child_activities(session_name) + + rpc_session = RpcSession(sock, self) + self._spawn_activity(rpc_session) - self._rpc_session = RpcSession(socket, self) - self._rpc_session.start() + def _send_rpc_notification_to_session(self, session, method, params): + if not session.is_connected: + # Stops disconnected RPC session. + self._stop_child_activities(session.name) + return + + return session.send_notification(method, params) def send_rpc_notification(self, method, params): - if (self.started and self._rpc_session is not None and - self._rpc_session.started): - return self._rpc_session.send_notification(method, params) + if not self.started: + return + + for session in list(self._child_activity_map.values()): + if not isinstance(session, RpcSession): + continue + self._send_rpc_notification_to_session(session, method, params) def _handle_response(response): @@ -382,17 +410,5 @@ raise ApiException(desc='Invalid type for RPC parameter.') -def _send_success_response(rpc_session, socket, request, result): - response = rpc_session.create_success_response(request[RPC_IDX_MSG_ID], - result) - socket.sendall(response) - - -def _send_error_response(rpc_session, socket, request, emsg): - response = rpc_session.create_error_response(request[RPC_IDX_MSG_ID], - str(emsg)) - socket.sendall(response) - - # Network controller singleton NET_CONTROLLER = _NetworkController() diff -Nru ryu-4.9/ryu/services/protocols/bgp/operator/commands/show/neighbor.py ryu-4.15/ryu/services/protocols/bgp/operator/commands/show/neighbor.py --- ryu-4.9/ryu/services/protocols/bgp/operator/commands/show/neighbor.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/operator/commands/show/neighbor.py 2017-07-02 11:08:32.000000000 +0000 @@ -128,7 +128,7 @@ if v.get('timestamp'): time = strftime("%Y/%m/%d %H:%M:%S", v.get('timestamp')) ret += cls.fmtstr.format(path_status, time, prefix, labels, - next_hop, str(med), str(localpref), + str(next_hop), str(med), str(localpref), ' '.join(map(str, aspath))) return ret diff -Nru ryu-4.9/ryu/services/protocols/bgp/operator/commands/show/rib.py ryu-4.15/ryu/services/protocols/bgp/operator/commands/show/rib.py --- ryu-4.9/ryu/services/protocols/bgp/operator/commands/show/rib.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/operator/commands/show/rib.py 2017-07-02 11:08:32.000000000 +0000 @@ -11,7 +11,19 @@ class RibBase(Command, RouteFormatterMixin): - supported_families = ['ipv4', 'ipv6', 'vpnv4', 'rtfilter', 'vpnv6', 'evpn'] + supported_families = [ + 'ipv4', + 'ipv6', + 'vpnv4', + 'vpnv6', + 'rtfilter', + 'evpn', + 'ipv4fs', + 'ipv6fs', + 'vpnv4fs', + 'vpnv6fs', + 'l2vpnfs', + ] class Rib(RibBase): diff -Nru ryu-4.9/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py ryu-4.15/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py --- ryu-4.9/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/operator/commands/show/route_formatter_mixin.py 2017-07-02 11:08:32.000000000 +0000 @@ -42,7 +42,7 @@ # Append path info to String buffer. buff.write(cls.fmtstr.format(path_status, prefix, str(labels), - next_hop, bpr, str(med), + str(next_hop), bpr, str(med), str(localpref), ' '.join(map(str, aspath)))) diff -Nru ryu-4.9/ryu/services/protocols/bgp/operator/internal_api.py ryu-4.15/ryu/services/protocols/bgp/operator/internal_api.py --- ryu-4.9/ryu/services/protocols/bgp/operator/internal_api.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/operator/internal_api.py 2017-07-02 11:08:32.000000000 +0000 @@ -7,6 +7,11 @@ from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.lib.packet.bgp import RF_IPv6_VPN from ryu.lib.packet.bgp import RF_L2_EVPN +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC from ryu.lib.packet.bgp import RF_RTC_UC from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH @@ -84,6 +89,11 @@ 'vpnv4': RF_IPv4_VPN, 'vpnv6': RF_IPv6_VPN, 'evpn': RF_L2_EVPN, + 'ipv4fs': RF_IPv4_FLOWSPEC, + 'ipv6fs': RF_IPv6_FLOWSPEC, + 'vpnv4fs': RF_VPNv4_FLOWSPEC, + 'vpnv6fs': RF_VPNv6_FLOWSPEC, + 'l2vpnfs': RF_L2VPN_FLOWSPEC, 'rtfilter': RF_RTC_UC } if addr_family not in rfs: diff -Nru ryu-4.9/ryu/services/protocols/bgp/peer.py ryu-4.15/ryu/services/protocols/bgp/peer.py --- ryu-4.9/ryu/services/protocols/bgp/peer.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/peer.py 2017-07-02 11:08:32.000000000 +0000 @@ -54,6 +54,8 @@ from ryu.lib.packet.bgp import RF_IPv6_UC from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.lib.packet.bgp import RF_IPv6_VPN +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC from ryu.lib.packet.bgp import RF_RTC_UC from ryu.lib.packet.bgp import get_rf @@ -75,6 +77,8 @@ from ryu.lib.packet.bgp import BGPPathAttributeAs4Path from ryu.lib.packet.bgp import BGPPathAttributeLocalPref from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities +from ryu.lib.packet.bgp import BGPPathAttributeOriginatorId +from ryu.lib.packet.bgp import BGPPathAttributeClusterList from ryu.lib.packet.bgp import BGPPathAttributeMpReachNLRI from ryu.lib.packet.bgp import BGPPathAttributeMpUnreachNLRI from ryu.lib.packet.bgp import BGPPathAttributeCommunities @@ -90,12 +94,16 @@ from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MP_UNREACH_NLRI from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC from ryu.lib.packet.bgp import BGP_ATTR_TYPE_COMMUNITIES +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGINATOR_ID +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_CLUSTER_LIST from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES from ryu.lib.packet.bgp import BGP_ATTR_TYEP_PMSI_TUNNEL_ATTRIBUTE from ryu.lib.packet.bgp import BGPTwoOctetAsSpecificExtendedCommunity from ryu.lib.packet.bgp import BGPIPv4AddressSpecificExtendedCommunity +from ryu.lib.packet import safi as subaddr_family + LOG = logging.getLogger('bgpspeaker.peer') @@ -439,6 +447,10 @@ return self._neigh_conf.is_route_server_client @property + def is_route_reflector_client(self): + return self._neigh_conf.is_route_reflector_client + + @property def check_first_as(self): return self._neigh_conf.check_first_as @@ -976,8 +988,37 @@ new_pathattr.append(mpunreach_attr) elif self.is_route_server_client: nlri_list = [path.nlri] - for pathattr in path.pathattr_map.values(): - new_pathattr.append(pathattr) + new_pathattr.extend(pathattr_map.values()) + elif self.is_route_reflector_client: + nlri_list = [path.nlri] + + # Append ORIGINATOR_ID attribute if not already exists. + if BGP_ATTR_TYPE_ORIGINATOR_ID not in pathattr_map: + originator_id = path.source + if originator_id is None: + originator_id = self._common_conf.router_id + elif isinstance(path.source, Peer): + originator_id = path.source.ip_address + new_pathattr.append( + BGPPathAttributeOriginatorId(value=originator_id)) + + # Append CLUSTER_LIST attribute if not already exists. + if BGP_ATTR_TYPE_CLUSTER_LIST not in pathattr_map: + new_pathattr.append( + BGPPathAttributeClusterList( + [self._common_conf.cluster_id])) + + for t, path_attr in pathattr_map.items(): + if t == BGP_ATTR_TYPE_CLUSTER_LIST: + # Append own CLUSTER_ID into CLUSTER_LIST attribute + # if already exists. + cluster_list = list(path_attr.value) + if self._common_conf.cluster_id not in cluster_list: + cluster_list.append(self._common_conf.cluster_id) + new_pathattr.append( + BGPPathAttributeClusterList(cluster_list)) + else: + new_pathattr.append(path_attr) else: # Supported and un-supported/unknown attributes. origin_attr = None @@ -993,9 +1034,11 @@ unknown_opttrans_attrs = None nlri_list = [path.nlri] - # By default, we use BGPSpeaker's interface IP with this peer - # as next_hop. - if self.is_ebgp_peer(): + if path.route_family.safi in (subaddr_family.IP_FLOWSPEC, + subaddr_family.VPN_FLOWSPEC): + # Flow Specification does not have next_hop. + next_hop = [] + elif self.is_ebgp_peer(): next_hop = self._session_next_hop(path) if path.is_local() and path.has_nexthop(): next_hop = path.nexthop @@ -1003,7 +1046,10 @@ next_hop = path.nexthop # RFC 4271 allows us to change next_hop # if configured to announce its own ip address. - if self._neigh_conf.is_next_hop_self: + # Also if the BGP route is configured without next_hop, + # we use path._session_next_hop() as next_hop. + if (self._neigh_conf.is_next_hop_self + or (path.is_local() and not path.has_nexthop())): next_hop = self._session_next_hop(path) LOG.debug('using %s as a next_hop address instead' ' of path.nexthop %s', next_hop, path.nexthop) @@ -1116,8 +1162,10 @@ # For iBGP peers we are required to send local-pref attribute # for connected or local prefixes. We check if the path matches # attribute_maps and set local-pref value. - # If the path doesn't match, we set default local-pref 100. - localpref_attr = BGPPathAttributeLocalPref(100) + # If the path doesn't match, we set default local-pref given + # from the user. The default value is 100. + localpref_attr = BGPPathAttributeLocalPref( + self._common_conf.local_pref) key = const.ATTR_MAPS_LABEL_DEFAULT if isinstance(path, (Vpnv4Path, Vpnv6Path)): @@ -1481,9 +1529,14 @@ raise bgp.MissingWellKnown(BGP_ATTR_TYPE_ORIGIN) # Validate Next hop. - # TODO(PH): Currently ignore other cases. - if (not mp_reach_attr.next_hop or - (mp_reach_attr.next_hop == self.host_bind_ip)): + if mp_reach_attr.route_family.safi in ( + subaddr_family.IP_FLOWSPEC, + subaddr_family.VPN_FLOWSPEC): + # Because the Flow Specification does not have nexthop, + # skips check. + pass + elif (not mp_reach_attr.next_hop or + mp_reach_attr.next_hop == self.host_bind_ip): LOG.error('Nexthop of received UPDATE msg. (%s) same as local' ' interface address %s.', mp_reach_attr.next_hop, @@ -1503,36 +1556,42 @@ Assumes Multiprotocol Extensions capability is supported and enabled. """ assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED + + # Increment count of update received. self.state.incr(PeerCounterNames.RECV_UPDATES) + if not self._validate_update_msg(update_msg): # If update message was not valid for some reason, we ignore its # routes. LOG.error('UPDATE message was invalid, hence ignoring its routes.') return - # Increment count of update received. - mp_reach_attr = update_msg.get_path_attr(BGP_ATTR_TYPE_MP_REACH_NLRI) - mp_unreach_attr = update_msg.get_path_attr( - BGP_ATTR_TYPE_MP_UNREACH_NLRI) - # Extract advertised path attributes and reconstruct AS_PATH attribute self._extract_and_reconstruct_as_path(update_msg) - nlri_list = update_msg.nlri - withdraw_list = update_msg.withdrawn_routes + # Check if path attributes have loops. + if self._is_looped_path_attrs(update_msg): + return + umsg_pattrs = update_msg.pathattr_map + mp_reach_attr = umsg_pattrs.get(BGP_ATTR_TYPE_MP_REACH_NLRI, None) if mp_reach_attr: - # Extract advertised paths from given message. + # Extract advertised MP-BGP paths from given message. self._extract_and_handle_mpbgp_new_paths(update_msg) + mp_unreach_attr = umsg_pattrs.get(BGP_ATTR_TYPE_MP_UNREACH_NLRI, None) if mp_unreach_attr: - # Extract withdraws from given message. + # Extract MP-BGP withdraws from given message. self._extract_and_handle_mpbgp_withdraws(mp_unreach_attr) + nlri_list = update_msg.nlri if nlri_list: + # Extract advertised BGP paths from given message. self._extract_and_handle_bgp4_new_paths(update_msg) + withdraw_list = update_msg.withdrawn_routes if withdraw_list: + # Extract BGP withdraws from given message. self._extract_and_handle_bgp4_withdraws(withdraw_list) def _extract_and_reconstruct_as_path(self, update_msg): @@ -1597,6 +1656,48 @@ as_path = self._construct_as_path_attr(as_path, as4_path) update_msg.path_attributes.append(as_path) + def _is_looped_path_attrs(self, update_msg): + """ + Extracts path attributes from the given UPDATE message and checks + if the given attributes have loops or not. + + :param update_msg: UPDATE message instance. + :return: True if attributes have loops. Otherwise False. + """ + umsg_pattrs = update_msg.pathattr_map + recv_open_msg = self.protocol.recv_open_msg + + # Check if AS_PATH has loops. + aspath = umsg_pattrs.get(BGP_ATTR_TYPE_AS_PATH) + if (aspath is not None + and aspath.has_local_as( + self.local_as, + max_count=self._common_conf.allow_local_as_in_count)): + LOG.error( + 'AS_PATH on UPDATE message has loops. ' + 'Ignoring this message: %s', + update_msg) + return + + # Check if ORIGINATOR_ID has loops. [RFC4456] + originator_id = umsg_pattrs.get(BGP_ATTR_TYPE_ORIGINATOR_ID, None) + if (originator_id + and recv_open_msg.bgp_identifier == originator_id): + LOG.error( + 'ORIGINATOR_ID on UPDATE message has loops. ' + 'Ignoring this message: %s', + update_msg) + return + + # Check if CLUSTER_LIST has loops. [RFC4456] + cluster_list = umsg_pattrs.get(BGP_ATTR_TYPE_CLUSTER_LIST, None) + if (cluster_list + and self._common_conf.cluster_id in cluster_list.value): + LOG.error( + 'CLUSTER_LIST on UPDATE message has loops. ' + 'Ignoring this message: %s', update_msg) + return + def _extract_and_handle_bgp4_new_paths(self, update_msg): """Extracts new paths advertised in the given update message's *MpReachNlri* attribute. @@ -1611,23 +1712,8 @@ processing. """ umsg_pattrs = update_msg.pathattr_map - - msg_rf = RF_IPv4_UC - # Check if this route family is among supported route families. - if msg_rf not in SUPPORTED_GLOBAL_RF: - LOG.info(('Received route for route family %s which is' - ' not supported. Ignoring paths from this UPDATE: %s') % - (msg_rf, update_msg)) - return - - aspath = umsg_pattrs.get(BGP_ATTR_TYPE_AS_PATH) - # Check if AS_PATH has loops. - if aspath.has_local_as(self.local_as): - LOG.error('Update message AS_PATH has loops. Ignoring this' - ' UPDATE. %s', update_msg) - return - next_hop = update_msg.get_path_attr(BGP_ATTR_TYPE_NEXT_HOP).value + # Nothing to do if we do not have any new NLRIs in this message. msg_nlri_list = update_msg.nlri if not msg_nlri_list: @@ -1684,16 +1770,6 @@ processing. """ msg_rf = RF_IPv4_UC - # Check if this route family is among supported route families. - if msg_rf not in SUPPORTED_GLOBAL_RF: - LOG.info( - ( - 'Received route for route family %s which is' - ' not supported. Ignoring withdraws form this UPDATE.' - ) % msg_rf - ) - return - w_nlris = withdraw_list if not w_nlris: # If this is EOR of some kind, handle it @@ -1748,13 +1824,6 @@ (msg_rf, update_msg)) return - aspath = umsg_pattrs.get(BGP_ATTR_TYPE_AS_PATH) - # Check if AS_PATH has loops. - if aspath.has_local_as(self.local_as): - LOG.error('Update message AS_PATH has loops. Ignoring this' - ' UPDATE. %s', update_msg) - return - if msg_rf in (RF_IPv4_VPN, RF_IPv6_VPN): # Check if we have Extended Communities Attribute. # TODO(PH): Check if RT_NLRI afi/safi will ever have this attribute @@ -1784,6 +1853,7 @@ return next_hop = mpreach_nlri_attr.next_hop + # Nothing to do if we do not have any new NLRIs in this message. msg_nlri_list = mpreach_nlri_attr.nlri if not msg_nlri_list: @@ -1846,11 +1916,9 @@ # Check if this route family is among supported route families. if msg_rf not in SUPPORTED_GLOBAL_RF: LOG.info( - ( - 'Received route for route family %s which is' - ' not supported. Ignoring withdraws form this UPDATE.' - ) % msg_rf - ) + 'Received route family %s is not supported. ' + 'Ignoring withdraw routes on this UPDATE message.', + msg_rf) return w_nlris = mp_unreach_attr.withdrawn_routes @@ -2183,8 +2251,14 @@ # routing information contained in that UPDATE message to other # internal peers (unless the speaker acts as a BGP Route # Reflector) [RFC4271]. - if (self.remote_as == self._core_service.asn and - self.remote_as == path.source.remote_as): + if (self.remote_as == self._core_service.asn + and self.remote_as == path.source.remote_as + and isinstance(path.source, Peer) + and not path.source.is_route_reflector_client + and not self.is_route_reflector_client): + LOG.debug( + 'Skipping sending iBGP route to iBGP peer %s AS %s', + self.ip_address, self.remote_as) return # If new best path has community attribute, it should be taken into diff -Nru ryu-4.9/ryu/services/protocols/bgp/processor.py ryu-4.15/ryu/services/protocols/bgp/processor.py --- ryu-4.9/ryu/services/protocols/bgp/processor.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/processor.py 2017-07-02 11:08:32.000000000 +0000 @@ -30,6 +30,8 @@ from ryu.lib.packet.bgp import BGP_ATTR_TYPE_LOCAL_PREF from ryu.lib.packet.bgp import BGP_ATTR_TYPE_MULTI_EXIT_DISC from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGINATOR_ID +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_CLUSTER_LIST from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_IGP from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_EGP from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_INCOMPLETE @@ -107,7 +109,7 @@ dest_processed = 0 LOG.debug('Processing destination...') while (dest_processed < self.work_units_per_cycle and - not self._dest_queue.is_empty()): + not self._dest_queue.is_empty()): # We process the first destination in the queue. next_dest = self._dest_queue.pop_first() if next_dest: @@ -169,6 +171,7 @@ BPR_ASN = 'ASN' BPR_IGP_COST = 'IGP Cost' BPR_ROUTER_ID = 'Router ID' +BPR_CLUSTER_LIST = 'Cluster List' def _compare_by_version(path1, path2): @@ -212,6 +215,8 @@ 9. Select the route with the lowest IGP cost to the next hop. 10. Select the route received from the peer with the lowest BGP router ID. + 11. Select the route received from the peer with the shorter + CLUSTER_LIST length. Returns None if best-path among given paths cannot be computed else best path. @@ -252,9 +257,12 @@ best_path = _cmp_by_router_id(local_asn, path1, path2) best_path_reason = BPR_ROUTER_ID if best_path is None: + best_path = _cmp_by_cluster_list(path1, path2) + best_path_reason = BPR_CLUSTER_LIST + if best_path is None: best_path_reason = BPR_UNKNOWN - return (best_path, best_path_reason) + return best_path, best_path_reason def _cmp_by_reachable_nh(path1, path2): @@ -462,10 +470,14 @@ else: return path_source.remote_as - def get_router_id(path_source, local_bgp_id): + def get_router_id(path, local_bgp_id): + path_source = path.source if path_source is None: return local_bgp_id else: + originator_id = path.get_pattr(BGP_ATTR_TYPE_ORIGINATOR_ID) + if originator_id: + return originator_id.value return path_source.protocol.recv_open_msg.bgp_identifier path_source1 = path1.source @@ -482,7 +494,7 @@ is_ebgp2 = asn2 != local_asn # If both paths are from eBGP peers, then according to RFC we need # not tie break using router id. - if (is_ebgp1 and is_ebgp2): + if is_ebgp1 and is_ebgp2: return None if ((is_ebgp1 is True and is_ebgp2 is False) or @@ -497,8 +509,8 @@ local_bgp_id = path_source2.protocol.sent_open_msg.bgp_identifier # Get router ids. - router_id1 = get_router_id(path_source1, local_bgp_id) - router_id2 = get_router_id(path_source2, local_bgp_id) + router_id1 = get_router_id(path1, local_bgp_id) + router_id2 = get_router_id(path2, local_bgp_id) # If both router ids are same/equal we cannot decide. # This case is possible since router ids are arbitrary. @@ -507,7 +519,31 @@ # Select the path with lowest router Id. from ryu.services.protocols.bgp.utils.bgp import from_inet_ptoi - if (from_inet_ptoi(router_id1) < from_inet_ptoi(router_id2)): + if from_inet_ptoi(router_id1) < from_inet_ptoi(router_id2): return path1 else: return path2 + + +def _cmp_by_cluster_list(path1, path2): + """Selects the route received from the peer with the shorter + CLUSTER_LIST length. [RFC4456] + + The CLUSTER_LIST length is evaluated as zero if a route does not + carry the CLUSTER_LIST attribute. + """ + def _get_cluster_list_len(path): + c_list = path.get_pattr(BGP_ATTR_TYPE_CLUSTER_LIST) + if c_list is None: + return 0 + else: + return len(c_list.value) + + c_list_len1 = _get_cluster_list_len(path1) + c_list_len2 = _get_cluster_list_len(path2) + if c_list_len1 < c_list_len2: + return path1 + elif c_list_len1 > c_list_len2: + return path2 + else: + return None diff -Nru ryu-4.9/ryu/services/protocols/bgp/rtconf/base.py ryu-4.15/ryu/services/protocols/bgp/rtconf/base.py --- ryu-4.9/ryu/services/protocols/bgp/rtconf/base.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/rtconf/base.py 2017-07-02 11:08:32.000000000 +0000 @@ -46,6 +46,11 @@ CAP_MBGP_VPNV4 = 'cap_mbgp_vpnv4' CAP_MBGP_VPNV6 = 'cap_mbgp_vpnv6' CAP_MBGP_EVPN = 'cap_mbgp_evpn' +CAP_MBGP_IPV4FS = 'cap_mbgp_ipv4fs' +CAP_MBGP_IPV6FS = 'cap_mbgp_ipv6fs' +CAP_MBGP_VPNV4FS = 'cap_mbgp_vpnv4fs' +CAP_MBGP_VPNV6FS = 'cap_mbgp_vpnv6fs' +CAP_MBGP_L2VPNFS = 'cap_mbgp_l2vpnfs' CAP_RTC = 'cap_rtc' RTC_AS = 'rtc_as' HOLD_TIME = 'hold_time' @@ -562,7 +567,7 @@ @validate(name=ConfWithStats.STATS_LOG_ENABLED) def validate_stats_log_enabled(stats_log_enabled): - if stats_log_enabled not in (True, False): + if not isinstance(stats_log_enabled, bool): raise ConfigTypeError(desc='Statistics log enabled settings can only' ' be boolean type.') return stats_log_enabled @@ -582,7 +587,7 @@ @validate(name=CAP_REFRESH) def validate_cap_refresh(crefresh): - if crefresh not in (True, False): + if not isinstance(crefresh, bool): raise ConfigTypeError(desc='Invalid Refresh capability settings: %s. ' 'Boolean value expected' % crefresh) return crefresh @@ -590,7 +595,7 @@ @validate(name=CAP_ENHANCED_REFRESH) def validate_cap_enhanced_refresh(cer): - if cer not in (True, False): + if not isinstance(cer, bool): raise ConfigTypeError(desc='Invalid Enhanced Refresh capability ' 'settings: %s. Boolean value expected' % cer) return cer @@ -598,7 +603,7 @@ @validate(name=CAP_FOUR_OCTET_AS_NUMBER) def validate_cap_four_octet_as_number(cfoan): - if cfoan not in (True, False): + if not isinstance(cfoan, bool): raise ConfigTypeError(desc='Invalid Four-Octet AS Number capability ' 'settings: %s boolean value expected' % cfoan) return cfoan @@ -606,7 +611,7 @@ @validate(name=CAP_MBGP_IPV4) def validate_cap_mbgp_ipv4(cmv4): - if cmv4 not in (True, False): + if not isinstance(cmv4, bool): raise ConfigTypeError(desc='Invalid MP-BGP IPv4 capability ' 'settings: %s. Boolean value expected' % cmv4) @@ -615,7 +620,7 @@ @validate(name=CAP_MBGP_IPV6) def validate_cap_mbgp_ipv6(cmv6): - if cmv6 not in (True, False): + if not isinstance(cmv6, bool): raise ConfigTypeError(desc='Invalid MP-BGP IPv6 capability ' 'settings: %s. Boolean value expected' % cmv6) @@ -624,7 +629,7 @@ @validate(name=CAP_MBGP_VPNV4) def validate_cap_mbgp_vpnv4(cmv4): - if cmv4 not in (True, False): + if not isinstance(cmv4, bool): raise ConfigTypeError(desc='Invalid MP-BGP VPNv4 capability ' 'settings: %s. Boolean value expected' % cmv4) @@ -633,7 +638,7 @@ @validate(name=CAP_MBGP_VPNV6) def validate_cap_mbgp_vpnv6(cmv6): - if cmv6 not in (True, False): + if not isinstance(cmv6, bool): raise ConfigTypeError(desc='Invalid MP-BGP VPNv6 capability ' 'settings: %s. Boolean value expected' % cmv6) @@ -642,15 +647,60 @@ @validate(name=CAP_MBGP_EVPN) def validate_cap_mbgp_evpn(cmevpn): - if cmevpn not in (True, False): + if not isinstance(cmevpn, bool): raise ConfigTypeError(desc='Invalid Ethernet VPN capability ' 'settings: %s. Boolean value expected' % cmevpn) return cmevpn +@validate(name=CAP_MBGP_IPV4FS) +def validate_cap_mbgp_ipv4fs(cmv4fs): + if not isinstance(cmv4fs, bool): + raise ConfigTypeError(desc='Invalid MP-BGP ' + 'IPv4 Flow Specification capability ' + 'settings: %s. Boolean value expected' % cmv4fs) + return cmv4fs + + +@validate(name=CAP_MBGP_IPV6FS) +def validate_cap_mbgp_ipv6fs(cmv6fs): + if not isinstance(cmv6fs, bool): + raise ConfigTypeError(desc='Invalid MP-BGP ' + 'IPv6 Flow Specification capability ' + 'settings: %s. Boolean value expected' % cmv6fs) + return cmv6fs + + +@validate(name=CAP_MBGP_VPNV4FS) +def validate_cap_mbgp_vpnv4fs(cmv4fs): + if not isinstance(cmv4fs, bool): + raise ConfigTypeError(desc='Invalid MP-BGP ' + 'VPNv4 Flow Specification capability ' + 'settings: %s. Boolean value expected' % cmv4fs) + return cmv4fs + + +@validate(name=CAP_MBGP_VPNV6FS) +def validate_cap_mbgp_vpnv66fs(cmv6fs): + if not isinstance(cmv6fs, bool): + raise ConfigTypeError(desc='Invalid MP-BGP ' + 'VPNv6 Flow Specification capability ' + 'settings: %s. Boolean value expected' % cmv6fs) + return cmv6fs + + +@validate(name=CAP_MBGP_L2VPNFS) +def validate_cap_mbgp_l2vpnfs(cml2fs): + if not isinstance(cml2fs, bool): + raise ConfigTypeError(desc='Invalid MP-BGP ' + 'L2VPN Flow Specification capability ' + 'settings: %s. Boolean value expected' % cml2fs) + return cml2fs + + @validate(name=CAP_RTC) def validate_cap_rtc(cap_rtc): - if cap_rtc not in (True, False): + if not isinstance(cap_rtc, bool): raise ConfigTypeError(desc='Invalid type for specifying RTC ' 'capability. Expected boolean got: %s' % type(cap_rtc)) diff -Nru ryu-4.9/ryu/services/protocols/bgp/rtconf/common.py ryu-4.15/ryu/services/protocols/bgp/rtconf/common.py --- ryu-4.9/ryu/services/protocols/bgp/rtconf/common.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/rtconf/common.py 2017-07-02 11:08:32.000000000 +0000 @@ -37,9 +37,17 @@ # Global configuration settings. LOCAL_AS = 'local_as' ROUTER_ID = 'router_id' +CLUSTER_ID = 'cluster_id' LABEL_RANGE = 'label_range' LABEL_RANGE_MAX = 'max' LABEL_RANGE_MIN = 'min' +LOCAL_PREF = 'local_pref' + +# Similar to Cisco command 'allowas-in'. Allows the local ASN in the path. +# Facilitates auto rd, auto rt import/export +# ("rd auto/route-target both auto") and simplified spine/leaf architectures, +# sharing an ASN between e.g. leafs. +ALLOW_LOCAL_AS_IN_COUNT = 'allow_local_as_in_count' # Configuration that can be set at global level as well as per context # (session/vrf) level @@ -78,6 +86,18 @@ DEFAULT_BGP_CONN_RETRY_TIME = 30 DEFAULT_MED = 0 DEFAULT_MAX_PATH_EXT_RTFILTER_ALL = True +DEFAULT_LOCAL_PREF = 100 + + +@validate(name=ALLOW_LOCAL_AS_IN_COUNT) +def validate_allow_local_as_in_count(count): + if not isinstance(count, numbers.Integral): + raise ConfigTypeError(desc=('Configuration value for %s has to be ' + 'integral type' % ALLOW_LOCAL_AS_IN_COUNT)) + if count < 0: + raise ConfigValueError(desc='Invalid local AS count %s' % count) + + return count @validate(name=LOCAL_AS) @@ -104,6 +124,16 @@ return router_id +@validate(name=CLUSTER_ID) +def validate_router_id(cluster_id): + if not isinstance(cluster_id, str): + raise ConfigTypeError(conf_name=CLUSTER_ID) + if not is_valid_ipv4(cluster_id): + raise ConfigValueError(desc='Invalid cluster id %s' % cluster_id) + + return cluster_id + + @validate(name=REFRESH_STALEPATH_TIME) def validate_refresh_stalepath_time(rst): if not isinstance(rst, numbers.Integral): @@ -184,13 +214,22 @@ @validate(name=MAX_PATH_EXT_RTFILTER_ALL) def validate_max_path_ext_rtfilter_all(max_path_ext_rtfilter_all): - if max_path_ext_rtfilter_all not in (True, False): + if not isinstance(max_path_ext_rtfilter_all, bool): raise ConfigTypeError(desc=('Invalid max_path_ext_rtfilter_all' ' configuration value %s' % max_path_ext_rtfilter_all)) return max_path_ext_rtfilter_all +@validate(name=LOCAL_PREF) +def validate_local_pref(local_pref): + if not isinstance(local_pref, numbers.Integral): + raise ConfigTypeError(desc=('Invalid local_pref' + ' configuration value %s' % + local_pref)) + return local_pref + + class CommonConf(BaseConf): """Encapsulates configurations applicable to all peer sessions. @@ -208,13 +247,18 @@ LABEL_RANGE, BGP_SERVER_PORT, TCP_CONN_TIMEOUT, BGP_CONN_RETRY_TIME, - MAX_PATH_EXT_RTFILTER_ALL]) + MAX_PATH_EXT_RTFILTER_ALL, + ALLOW_LOCAL_AS_IN_COUNT, + CLUSTER_ID, + LOCAL_PREF]) def __init__(self, **kwargs): super(CommonConf, self).__init__(**kwargs) def _init_opt_settings(self, **kwargs): super(CommonConf, self)._init_opt_settings(**kwargs) + self._settings[ALLOW_LOCAL_AS_IN_COUNT] = compute_optional_conf( + ALLOW_LOCAL_AS_IN_COUNT, 0, **kwargs) self._settings[LABEL_RANGE] = compute_optional_conf( LABEL_RANGE, DEFAULT_LABEL_RANGE, **kwargs) self._settings[REFRESH_STALEPATH_TIME] = compute_optional_conf( @@ -230,6 +274,10 @@ self._settings[MAX_PATH_EXT_RTFILTER_ALL] = compute_optional_conf( MAX_PATH_EXT_RTFILTER_ALL, DEFAULT_MAX_PATH_EXT_RTFILTER_ALL, **kwargs) + self._settings[CLUSTER_ID] = compute_optional_conf( + CLUSTER_ID, kwargs[ROUTER_ID], **kwargs) + self._settings[LOCAL_PREF] = compute_optional_conf( + LOCAL_PREF, DEFAULT_LOCAL_PREF, **kwargs) # ========================================================================= # Required attributes @@ -246,6 +294,13 @@ # ========================================================================= # Optional attributes with valid defaults. # ========================================================================= + @property + def cluster_id(self): + return self._settings[CLUSTER_ID] + + @property + def allow_local_as_in_count(self): + return self._settings[ALLOW_LOCAL_AS_IN_COUNT] @property def bgp_conn_retry_time(self): @@ -275,6 +330,10 @@ def max_path_ext_rtfilter_all(self): return self._settings[MAX_PATH_EXT_RTFILTER_ALL] + @property + def local_pref(self): + return self._settings[LOCAL_PREF] + @classmethod def get_opt_settings(self): self_confs = super(CommonConf, self).get_opt_settings() diff -Nru ryu-4.9/ryu/services/protocols/bgp/rtconf/neighbors.py ryu-4.15/ryu/services/protocols/bgp/rtconf/neighbors.py --- ryu-4.9/ryu/services/protocols/bgp/rtconf/neighbors.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/rtconf/neighbors.py 2017-07-02 11:08:32.000000000 +0000 @@ -27,6 +27,11 @@ from ryu.lib.packet.bgp import RF_IPv4_VPN from ryu.lib.packet.bgp import RF_IPv6_VPN from ryu.lib.packet.bgp import RF_L2_EVPN +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_VPNv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC from ryu.lib.packet.bgp import RF_RTC_UC from ryu.lib.packet.bgp import BGPOptParamCapabilityFourOctetAsNumber from ryu.lib.packet.bgp import BGPOptParamCapabilityEnhancedRouteRefresh @@ -48,6 +53,11 @@ from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4 from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6 from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_EVPN +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV4FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_IPV6FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV4FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_VPNV6FS +from ryu.services.protocols.bgp.rtconf.base import CAP_MBGP_L2VPNFS from ryu.services.protocols.bgp.rtconf.base import CAP_REFRESH from ryu.services.protocols.bgp.rtconf.base import CAP_RTC from ryu.services.protocols.bgp.rtconf.base import compute_optional_conf @@ -86,6 +96,7 @@ IN_FILTER = 'in_filter' OUT_FILTER = 'out_filter' IS_ROUTE_SERVER_CLIENT = 'is_route_server_client' +IS_ROUTE_REFLECTOR_CLIENT = 'is_route_reflector_client' CHECK_FIRST_AS = 'check_first_as' ATTRIBUTE_MAP = 'attribute_map' IS_NEXT_HOP_SELF = 'is_next_hop_self' @@ -104,12 +115,18 @@ DEFAULT_CAP_MBGP_VPNV4 = False DEFAULT_CAP_MBGP_VPNV6 = False DEFAULT_CAP_MBGP_EVPN = False +DEFAULT_CAP_MBGP_IPV4FS = False +DEFAULT_CAP_MBGP_IPV6FS = False +DEFAULT_CAP_MBGP_VPNV4FS = False +DEFAULT_CAP_MBGP_VPNV6FS = False +DEFAULT_CAP_MBGP_L2VPNFS = False DEFAULT_HOLD_TIME = 40 DEFAULT_ENABLED = True DEFAULT_CAP_RTC = False DEFAULT_IN_FILTER = [] DEFAULT_OUT_FILTER = [] DEFAULT_IS_ROUTE_SERVER_CLIENT = False +DEFAULT_IS_ROUTE_REFLECTOR_CLIENT = False DEFAULT_CHECK_FIRST_AS = False DEFAULT_IS_NEXT_HOP_SELF = False DEFAULT_CONNECT_MODE = CONNECT_MODE_BOTH @@ -257,16 +274,25 @@ @validate(name=IS_ROUTE_SERVER_CLIENT) def validate_is_route_server_client(is_route_server_client): - if is_route_server_client not in (True, False): + if not isinstance(is_route_server_client, bool): raise ConfigValueError(desc='Invalid is_route_server_client(%s)' % is_route_server_client) return is_route_server_client +@validate(name=IS_ROUTE_REFLECTOR_CLIENT) +def validate_is_route_reflector_client(is_route_reflector_client): + if not isinstance(is_route_reflector_client, bool): + raise ConfigValueError(desc='Invalid is_route_reflector_client(%s)' % + is_route_reflector_client) + + return is_route_reflector_client + + @validate(name=CHECK_FIRST_AS) def validate_check_first_as(check_first_as): - if check_first_as not in (True, False): + if not isinstance(check_first_as, bool): raise ConfigValueError(desc='Invalid check_first_as(%s)' % check_first_as) @@ -275,7 +301,7 @@ @validate(name=IS_NEXT_HOP_SELF) def validate_is_next_hop_self(is_next_hop_self): - if is_next_hop_self not in (True, False): + if not isinstance(is_next_hop_self, bool): raise ConfigValueError(desc='Invalid is_next_hop_self(%s)' % is_next_hop_self) @@ -306,13 +332,19 @@ CAP_FOUR_OCTET_AS_NUMBER, CAP_MBGP_IPV4, CAP_MBGP_IPV6, CAP_MBGP_VPNV4, CAP_MBGP_VPNV6, - CAP_RTC, CAP_MBGP_EVPN, RTC_AS, HOLD_TIME, + CAP_RTC, CAP_MBGP_EVPN, + CAP_MBGP_IPV4FS, CAP_MBGP_VPNV4FS, + CAP_MBGP_IPV6FS, CAP_MBGP_VPNV6FS, + CAP_MBGP_L2VPNFS, + RTC_AS, HOLD_TIME, ENABLED, MULTI_EXIT_DISC, MAX_PREFIXES, ADVERTISE_PEER_AS, SITE_OF_ORIGINS, LOCAL_ADDRESS, LOCAL_PORT, LOCAL_AS, PEER_NEXT_HOP, PASSWORD, IN_FILTER, OUT_FILTER, - IS_ROUTE_SERVER_CLIENT, CHECK_FIRST_AS, + IS_ROUTE_SERVER_CLIENT, + IS_ROUTE_REFLECTOR_CLIENT, + CHECK_FIRST_AS, IS_NEXT_HOP_SELF, CONNECT_MODE]) def __init__(self, **kwargs): @@ -336,6 +368,16 @@ CAP_MBGP_EVPN, DEFAULT_CAP_MBGP_EVPN, **kwargs) self._settings[CAP_MBGP_VPNV6] = compute_optional_conf( CAP_MBGP_VPNV6, DEFAULT_CAP_MBGP_VPNV6, **kwargs) + self._settings[CAP_MBGP_IPV4FS] = compute_optional_conf( + CAP_MBGP_IPV4FS, DEFAULT_CAP_MBGP_IPV4FS, **kwargs) + self._settings[CAP_MBGP_IPV6FS] = compute_optional_conf( + CAP_MBGP_IPV6FS, DEFAULT_CAP_MBGP_IPV6FS, **kwargs) + self._settings[CAP_MBGP_VPNV4FS] = compute_optional_conf( + CAP_MBGP_VPNV4FS, DEFAULT_CAP_MBGP_VPNV4FS, **kwargs) + self._settings[CAP_MBGP_VPNV6FS] = compute_optional_conf( + CAP_MBGP_VPNV6FS, DEFAULT_CAP_MBGP_VPNV6FS, **kwargs) + self._settings[CAP_MBGP_L2VPNFS] = compute_optional_conf( + CAP_MBGP_L2VPNFS, DEFAULT_CAP_MBGP_L2VPNFS, **kwargs) self._settings[HOLD_TIME] = compute_optional_conf( HOLD_TIME, DEFAULT_HOLD_TIME, **kwargs) self._settings[ENABLED] = compute_optional_conf( @@ -351,6 +393,9 @@ self._settings[IS_ROUTE_SERVER_CLIENT] = compute_optional_conf( IS_ROUTE_SERVER_CLIENT, DEFAULT_IS_ROUTE_SERVER_CLIENT, **kwargs) + self._settings[IS_ROUTE_REFLECTOR_CLIENT] = compute_optional_conf( + IS_ROUTE_REFLECTOR_CLIENT, + DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, **kwargs) self._settings[CHECK_FIRST_AS] = compute_optional_conf( CHECK_FIRST_AS, DEFAULT_CHECK_FIRST_AS, **kwargs) self._settings[IS_NEXT_HOP_SELF] = compute_optional_conf( @@ -503,6 +548,26 @@ return self._settings[CAP_MBGP_EVPN] @property + def cap_mbgp_ipv4fs(self): + return self._settings[CAP_MBGP_IPV4FS] + + @property + def cap_mbgp_ipv6fs(self): + return self._settings[CAP_MBGP_IPV6FS] + + @property + def cap_mbgp_vpnv4fs(self): + return self._settings[CAP_MBGP_VPNV4FS] + + @property + def cap_mbgp_vpnv6fs(self): + return self._settings[CAP_MBGP_VPNV6FS] + + @property + def cap_mbgp_l2vpnfs(self): + return self._settings[CAP_MBGP_L2VPNFS] + + @property def cap_rtc(self): return self._settings[CAP_RTC] @@ -560,6 +625,10 @@ return self._settings[IS_ROUTE_SERVER_CLIENT] @property + def is_route_reflector_client(self): + return self._settings[IS_ROUTE_REFLECTOR_CLIENT] + + @property def check_first_as(self): return self._settings[CHECK_FIRST_AS] @@ -622,6 +691,31 @@ BGPOptParamCapabilityMultiprotocol( RF_L2_EVPN.afi, RF_L2_EVPN.safi)) + if self.cap_mbgp_ipv4fs: + mbgp_caps.append( + BGPOptParamCapabilityMultiprotocol( + RF_IPv4_FLOWSPEC.afi, RF_IPv4_FLOWSPEC.safi)) + + if self.cap_mbgp_ipv6fs: + mbgp_caps.append( + BGPOptParamCapabilityMultiprotocol( + RF_IPv6_FLOWSPEC.afi, RF_IPv6_FLOWSPEC.safi)) + + if self.cap_mbgp_vpnv4fs: + mbgp_caps.append( + BGPOptParamCapabilityMultiprotocol( + RF_VPNv4_FLOWSPEC.afi, RF_VPNv4_FLOWSPEC.safi)) + + if self.cap_mbgp_vpnv6fs: + mbgp_caps.append( + BGPOptParamCapabilityMultiprotocol( + RF_VPNv6_FLOWSPEC.afi, RF_VPNv6_FLOWSPEC.safi)) + + if self.cap_mbgp_l2vpnfs: + mbgp_caps.append( + BGPOptParamCapabilityMultiprotocol( + RF_L2VPN_FLOWSPEC.afi, RF_L2VPN_FLOWSPEC.safi)) + if mbgp_caps: capabilities[BGP_CAP_MULTIPROTOCOL] = mbgp_caps diff -Nru ryu-4.9/ryu/services/protocols/bgp/rtconf/vrfs.py ryu-4.15/ryu/services/protocols/bgp/rtconf/vrfs.py --- ryu-4.9/ryu/services/protocols/bgp/rtconf/vrfs.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/rtconf/vrfs.py 2017-07-02 11:08:32.000000000 +0000 @@ -23,6 +23,9 @@ from ryu.lib.packet.bgp import RF_IPv4_UC from ryu.lib.packet.bgp import RF_IPv6_UC from ryu.lib.packet.bgp import RF_L2_EVPN +from ryu.lib.packet.bgp import RF_IPv4_FLOWSPEC +from ryu.lib.packet.bgp import RF_IPv6_FLOWSPEC +from ryu.lib.packet.bgp import RF_L2VPN_FLOWSPEC from ryu.services.protocols.bgp.utils import validation from ryu.services.protocols.bgp.base import get_validator @@ -59,7 +62,17 @@ VRF_RF_IPV4 = 'ipv4' VRF_RF_IPV6 = 'ipv6' VRF_RF_L2_EVPN = 'evpn' -SUPPORTED_VRF_RF = (VRF_RF_IPV4, VRF_RF_IPV6, VRF_RF_L2_EVPN) +VRF_RF_IPV4_FLOWSPEC = 'ipv4fs' +VRF_RF_IPV6_FLOWSPEC = 'ipv6fs' +VRF_RF_L2VPN_FLOWSPEC = 'l2vpnfs' +SUPPORTED_VRF_RF = ( + VRF_RF_IPV4, + VRF_RF_IPV6, + VRF_RF_L2_EVPN, + VRF_RF_IPV4_FLOWSPEC, + VRF_RF_IPV6_FLOWSPEC, + VRF_RF_L2VPN_FLOWSPEC, +) # Default configuration values. @@ -226,6 +239,12 @@ return RF_IPv6_UC elif vrf_rf == VRF_RF_L2_EVPN: return RF_L2_EVPN + elif vrf_rf == VRF_RF_IPV4_FLOWSPEC: + return RF_IPv4_FLOWSPEC + elif vrf_rf == VRF_RF_IPV6_FLOWSPEC: + return RF_IPv6_FLOWSPEC + elif vrf_rf == VRF_RF_L2VPN_FLOWSPEC: + return RF_L2VPN_FLOWSPEC else: raise ValueError('Unsupported VRF route family given %s' % vrf_rf) @@ -237,6 +256,12 @@ return VRF_RF_IPV6 elif route_family == RF_L2_EVPN: return VRF_RF_L2_EVPN + elif route_family == RF_IPv4_FLOWSPEC: + return VRF_RF_IPV4_FLOWSPEC + elif route_family == RF_IPv6_FLOWSPEC: + return VRF_RF_IPV6_FLOWSPEC + elif route_family == RF_L2VPN_FLOWSPEC: + return VRF_RF_L2VPN_FLOWSPEC else: raise ValueError('No supported mapping for route family ' 'to vrf_route_family exists for %s' % diff -Nru ryu-4.9/ryu/services/protocols/bgp/utils/bgp.py ryu-4.15/ryu/services/protocols/bgp/utils/bgp.py --- ryu-4.9/ryu/services/protocols/bgp/utils/bgp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/utils/bgp.py 2017-07-02 11:08:32.000000000 +0000 @@ -17,8 +17,10 @@ Utilities related to bgp data types and models. """ import logging -import socket +import netaddr + +from ryu.lib import ip from ryu.lib.packet.bgp import ( BGPUpdate, RF_IPv4_UC, @@ -26,6 +28,11 @@ RF_IPv4_VPN, RF_IPv6_VPN, RF_L2_EVPN, + RF_IPv4_FLOWSPEC, + RF_IPv6_FLOWSPEC, + RF_VPNv4_FLOWSPEC, + RF_VPNv6_FLOWSPEC, + RF_L2VPN_FLOWSPEC, RF_RTC_UC, RouteTargetMembershipNLRI, BGP_ATTR_TYPE_MULTI_EXIT_DISC, @@ -36,6 +43,15 @@ BGPPathAttributeUnknown, BGP_ATTR_FLAG_OPTIONAL, BGP_ATTR_FLAG_TRANSITIVE, + BGPTwoOctetAsSpecificExtendedCommunity, + BGPIPv4AddressSpecificExtendedCommunity, + BGPFourOctetAsSpecificExtendedCommunity, + BGPFlowSpecTrafficRateCommunity, + BGPFlowSpecTrafficActionCommunity, + BGPFlowSpecRedirectCommunity, + BGPFlowSpecTrafficMarkingCommunity, + BGPFlowSpecVlanActionCommunity, + BGPFlowSpecTPIDActionCommunity, ) from ryu.services.protocols.bgp.info_base.rtc import RtcPath from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path @@ -43,6 +59,11 @@ from ryu.services.protocols.bgp.info_base.vpnv4 import Vpnv4Path from ryu.services.protocols.bgp.info_base.vpnv6 import Vpnv6Path from ryu.services.protocols.bgp.info_base.evpn import EvpnPath +from ryu.services.protocols.bgp.info_base.ipv4fs import IPv4FlowSpecPath +from ryu.services.protocols.bgp.info_base.ipv6fs import IPv6FlowSpecPath +from ryu.services.protocols.bgp.info_base.vpnv4fs import VPNv4FlowSpecPath +from ryu.services.protocols.bgp.info_base.vpnv6fs import VPNv6FlowSpecPath +from ryu.services.protocols.bgp.info_base.l2vpnfs import L2VPNFlowSpecPath LOG = logging.getLogger('utils.bgp') @@ -53,6 +74,11 @@ RF_IPv4_VPN: Vpnv4Path, RF_IPv6_VPN: Vpnv6Path, RF_L2_EVPN: EvpnPath, + RF_IPv4_FLOWSPEC: IPv4FlowSpecPath, + RF_IPv6_FLOWSPEC: IPv6FlowSpecPath, + RF_VPNv4_FLOWSPEC: VPNv4FlowSpecPath, + RF_VPNv6_FLOWSPEC: VPNv6FlowSpecPath, + RF_L2VPN_FLOWSPEC: L2VPNFlowSpecPath, RF_RTC_UC: RtcPath} @@ -102,8 +128,7 @@ """ four_byte_id = None try: - packed_byte = socket.inet_pton(socket.AF_INET, bgp_id) - four_byte_id = int(packed_byte.encode('hex'), 16) + four_byte_id = ip.ipv4_to_int(bgp_id) except ValueError: LOG.debug('Invalid bgp id given for conversion to integer value %s', bgp_id) @@ -141,3 +166,132 @@ # Bgp update message instance that can used as End of RIB marker. UPDATE_EOR = create_end_of_rib_update() + + +def create_rt_extended_community(value, subtype=2): + """ + Creates an instance of the BGP Route Target Community (if "subtype=2") + or Route Origin Community ("subtype=3"). + + :param value: String of Route Target or Route Origin value. + :param subtype: Subtype of Extended Community. + :return: An instance of Route Target or Route Origin Community. + """ + global_admin, local_admin = value.split(':') + local_admin = int(local_admin) + if global_admin.isdigit() and 0 <= int(global_admin) <= 0xffff: + ext_com = BGPTwoOctetAsSpecificExtendedCommunity( + subtype=subtype, + as_number=int(global_admin), + local_administrator=local_admin) + elif global_admin.isdigit() and 0xffff < int(global_admin) <= 0xffffffff: + ext_com = BGPFourOctetAsSpecificExtendedCommunity( + subtype=subtype, + as_number=int(global_admin), + local_administrator=local_admin) + elif netaddr.valid_ipv4(global_admin): + ext_com = BGPIPv4AddressSpecificExtendedCommunity( + subtype=subtype, + ipv4_address=global_admin, + local_administrator=local_admin) + else: + raise ValueError( + 'Invalid Route Target or Route Origin value: %s' % value) + + return ext_com + + +def create_v4flowspec_actions(actions=None): + """ + Create list of traffic filtering actions + for Ipv4 Flow Specification and VPNv4 Flow Specification. + + `` actions`` specifies Traffic Filtering Actions of + Flow Specification as a dictionary type value. + + Returns a list of extended community values. + """ + from ryu.services.protocols.bgp.api.prefix import ( + FLOWSPEC_ACTION_TRAFFIC_RATE, + FLOWSPEC_ACTION_TRAFFIC_ACTION, + FLOWSPEC_ACTION_REDIRECT, + FLOWSPEC_ACTION_TRAFFIC_MARKING, + ) + + # Supported action type for IPv4 and VPNv4. + action_types = { + FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity, + FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity, + FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity, + FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity, + } + + return _create_actions(actions, action_types) + + +def create_v6flowspec_actions(actions=None): + """ + Create list of traffic filtering actions + for Ipv6 Flow Specification and VPNv6 Flow Specification. + + "FLOWSPEC_ACTION_REDIRECT_IPV6" is not implemented yet. + """ + from ryu.services.protocols.bgp.api.prefix import ( + FLOWSPEC_ACTION_TRAFFIC_RATE, + FLOWSPEC_ACTION_TRAFFIC_ACTION, + FLOWSPEC_ACTION_REDIRECT, + FLOWSPEC_ACTION_TRAFFIC_MARKING, + ) + + # Supported action type for IPv6 and VPNv6. + action_types = { + FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity, + FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity, + FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity, + FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity, + } + + return _create_actions(actions, action_types) + + +def create_l2vpnflowspec_actions(actions=None): + """ + Create list of traffic filtering actions for L2VPN Flow Specification. + """ + from ryu.services.protocols.bgp.api.prefix import ( + FLOWSPEC_ACTION_TRAFFIC_RATE, + FLOWSPEC_ACTION_TRAFFIC_ACTION, + FLOWSPEC_ACTION_REDIRECT, + FLOWSPEC_ACTION_TRAFFIC_MARKING, + FLOWSPEC_ACTION_VLAN, + FLOWSPEC_ACTION_TPID, + ) + + # Supported action type for L2VPN. + action_types = { + FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity, + FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity, + FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity, + FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity, + FLOWSPEC_ACTION_VLAN: BGPFlowSpecVlanActionCommunity, + FLOWSPEC_ACTION_TPID: BGPFlowSpecTPIDActionCommunity, + } + + return _create_actions(actions, action_types) + + +def _create_actions(actions, action_types): + communities = [] + + if actions is None: + return communities + + for name, action in actions.items(): + cls_ = action_types.get(name, None) + if cls_: + communities.append(cls_(**action)) + else: + raise ValueError( + 'Unsupported flowspec action %s' % name) + + return communities diff -Nru ryu-4.9/ryu/services/protocols/bgp/utils/validation.py ryu-4.15/ryu/services/protocols/bgp/utils/validation.py --- ryu-4.9/ryu/services/protocols/bgp/utils/validation.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/bgp/utils/validation.py 2017-07-02 11:08:32.000000000 +0000 @@ -240,8 +240,9 @@ def is_valid_esi(esi): """Returns True if the given EVPN Ethernet SegmentEthernet ID is valid.""" - # Note: Currently, only integer type value is supported - return isinstance(esi, numbers.Integral) + if isinstance(esi, numbers.Integral): + return 0 <= esi <= 0xffffffffffffffffff + return isinstance(esi, dict) def is_valid_ethernet_tag_id(etag_id): diff -Nru ryu-4.9/ryu/services/protocols/ovsdb/api.py ryu-4.15/ryu/services/protocols/ovsdb/api.py --- ryu-4.9/ryu/services/protocols/ovsdb/api.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/ovsdb/api.py 2017-07-02 11:08:32.000000000 +0000 @@ -12,13 +12,12 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. -from ryu.lib import dpid as dpidlib -from ryu.services.protocols.ovsdb import event as ovsdb_event - -import six import uuid +from ryu.lib import dpid as dpidlib +from ryu.services.protocols.ovsdb import event as ovsdb_event + def _get_table_row(table, attr_name, attr_value, tables): sentinel = object() @@ -364,6 +363,8 @@ def set_controller(manager, system_id, bridge_name, target, controller_info=None): + controller_info = controller_info or {} + def _set_controller(tables, insert): bridge = _get_bridge(tables, bridge_name) @@ -379,7 +380,7 @@ controller.connection_mode = ['out-of-band'] if controller_info: - for key, val in six.iteritems(controller_info): + for key, val in controller_info.items(): setattr(controller, key, val) bridge.controller = [controller] @@ -419,11 +420,11 @@ port_info['name'] = default_port_name iface = insert(tables['Interface'], iface_insert_uuid) - for key, val in six.iteritems(iface_info): + for key, val in iface_info.items(): setattr(iface, key, val) port = insert(tables['Port'], port_insert_uuid) - for key, val in six.iteritems(port_info): + for key, val in port_info.items(): setattr(port, key, val) port.interfaces = [iface] diff -Nru ryu-4.9/ryu/services/protocols/ovsdb/client.py ryu-4.15/ryu/services/protocols/ovsdb/client.py --- ryu-4.9/ryu/services/protocols/ovsdb/client.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/ovsdb/client.py 2017-07-02 11:08:32.000000000 +0000 @@ -155,12 +155,12 @@ """ tables = {} - for tbl_name, tbl_data in schema['tables'].iteritems(): + for tbl_name, tbl_data in schema['tables'].items(): if not schema_tables or tbl_name in schema_tables: columns = {} exclude_columns = exclude_table_columns.get(tbl_name, []) - for col_name, col_data in tbl_data['columns'].iteritems(): + for col_name, col_data in tbl_data['columns'].items(): if col_name in exclude_columns: continue @@ -215,6 +215,8 @@ self._monitor_request_id = None self._last_seqno = None self.change_seqno = 0 + self.uuid = uuid.uuid1() + self.state = self.IDL_S_INITIAL # Database locking. self.lock_name = None # Name of lock we need, None if none. @@ -233,6 +235,8 @@ table.need_table = False table.rows = {} table.idl = self + table.condition = [] + table.cond_changed = False @property def events(self): @@ -282,7 +286,8 @@ @classmethod def factory(cls, sock, address, probe_interval=None, min_backoff=None, max_backoff=None, schema_tables=None, - schema_exclude_columns={}, *args, **kwargs): + schema_exclude_columns=None, *args, **kwargs): + schema_exclude_columns = schema_exclude_columns or {} ovs_stream = stream.Stream(sock, None, None) connection = jsonrpc.Connection(ovs_stream) schemas = discover_schemas(connection) diff -Nru ryu-4.9/ryu/services/protocols/ovsdb/event.py ryu-4.15/ryu/services/protocols/ovsdb/event.py --- ryu-4.9/ryu/services/protocols/ovsdb/event.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/ovsdb/event.py 2017-07-02 11:08:32.000000000 +0000 @@ -119,13 +119,17 @@ class EventNewOVSDBConnection(ryu_event.EventBase): - def __init__(self, system_id): + def __init__(self, client): super(EventNewOVSDBConnection, self).__init__() - self.system_id = system_id + self.client = client def __str__(self): return '%s' % (self.__class__.__name__, - self.system_id) + self.client.system_id) + + @property + def system_id(self): + return self.client.system_id class EventReadRequest(ryu_event.EventRequestBase): diff -Nru ryu-4.9/ryu/services/protocols/ovsdb/manager.py ryu-4.15/ryu/services/protocols/ovsdb/manager.py --- ryu-4.9/ryu/services/protocols/ovsdb/manager.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/ovsdb/manager.py 2017-07-02 11:08:32.000000000 +0000 @@ -146,7 +146,7 @@ if app: self._clients[app.name] = app app.start() - ev = event.EventNewOVSDBConnection(app.system_id) + ev = event.EventNewOVSDBConnection(app) self.send_event_to_observers(ev) else: diff -Nru ryu-4.9/ryu/services/protocols/zebra/client/event.py ryu-4.15/ryu/services/protocols/zebra/client/event.py --- ryu-4.9/ryu/services/protocols/zebra/client/event.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/client/event.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,46 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Events generated by Zebra Client service. +""" + +from ryu.controller.event import EventBase + + +class EventZClientBase(EventBase): + """ + The base class for the event generated by ZClient. + """ + + +class EventZServConnected(EventZClientBase): + """ + The event class for notifying the connection to Zebra server. + """ + + def __init__(self, zserv): + super(EventZServConnected, self).__init__() + self.zserv = zserv + + +class EventZServDisconnected(EventZClientBase): + """ + The event class for notifying the disconnection from Zebra server. + """ + + def __init__(self, zserv): + super(EventZServDisconnected, self).__init__() + self.zserv = zserv diff -Nru ryu-4.9/ryu/services/protocols/zebra/client/__init__.py ryu-4.15/ryu/services/protocols/zebra/client/__init__.py --- ryu-4.9/ryu/services/protocols/zebra/client/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/client/__init__.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,20 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Client implementation for Zebra protocol service. + +This module provides the client side implementation for Zebra protocol. +""" diff -Nru ryu-4.9/ryu/services/protocols/zebra/client/sample_dumper.py ryu-4.15/ryu/services/protocols/zebra/client/sample_dumper.py --- ryu-4.9/ryu/services/protocols/zebra/client/sample_dumper.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/client/sample_dumper.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,53 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Sample Zebra Client application dumping received events. +""" + +from ryu.controller.handler import set_ev_cls +from ryu.lib.packet import zebra +from ryu.services.protocols.zebra import event +from ryu.services.protocols.zebra.client.zclient import ZClient +from ryu.services.protocols.zebra.client import event as zclient_event + + +class ZClientDumper(ZClient): + + @set_ev_cls(zclient_event.EventZServConnected) + def _zserv_connected_handler(self, ev): + self.logger.info( + 'Zebra server connected to %s: %s', + ev.zserv.sock.getpeername(), ev.zserv.sock) + + @set_ev_cls(event.EventZebraRouterIDUpdate) + def _router_id_update_handler(self, ev): + self.logger.info( + 'ZEBRA_ROUTER_ID_UPDATE received: %s', ev.__dict__) + + @set_ev_cls(event.EventZebraInterfaceAdd) + def _interface_add_handler(self, ev): + self.logger.info( + 'ZEBRA_INTERFACE_ADD received: %s', ev.__dict__) + + @set_ev_cls(event.EventZebraInterfaceAddressAdd) + def _interface_address_add_handler(self, ev): + self.logger.info( + 'ZEBRA_INTERFACE_ADDRESS_ADD received: %s', ev.__dict__) + + @set_ev_cls(zclient_event.EventZServDisconnected) + def _zserv_disconnected_handler(self, ev): + self.logger.info( + 'Zebra server disconnected: %s', ev.zserv.sock) diff -Nru ryu-4.9/ryu/services/protocols/zebra/client/zclient.py ryu-4.15/ryu/services/protocols/zebra/client/zclient.py --- ryu-4.9/ryu/services/protocols/zebra/client/zclient.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/client/zclient.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,345 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Zebra Client corresponding to 'zclient' structure. +""" + +import os +import socket +import struct + +import netaddr + +from ryu import cfg +from ryu.base.app_manager import RyuApp +from ryu.lib import hub +from ryu.lib import ip +from ryu.lib.packet import zebra +from ryu.lib.packet import safi as packet_safi +from ryu.services.protocols.zebra import event +from ryu.services.protocols.zebra.client import event as zclient_event + + +CONF = cfg.CONF['zapi'] +GLOBAL_CONF = cfg.CONF + + +def create_connection(address): + """ + Wrapper for socket.create_connection() function. + + If *address* (a 2-tuple ``(host, port)``) contains a valid IPv4/v6 + address, passes *address* to socket.create_connection(). + If *host* is valid path to Unix Domain socket, tries to connect to + the server listening on the given socket. + + :param address: IP address or path to Unix Domain socket. + :return: Socket instance. + """ + host, _port = address + + if (netaddr.valid_ipv4(host) + or netaddr.valid_ipv6(host)): + return socket.create_connection(address) + elif os.path.exists(host): + sock = None + try: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(host) + except socket.error as e: + if sock is not None: + sock.close() + raise e + return sock + else: + raise ValueError('Invalid IP address or Unix Socket: %s' % host) + + +def get_zebra_route_type_by_name(route_type='BGP'): + """ + Returns the constant value for Zebra route type named "ZEBRA_ROUTE_*" + from its name. + + See "ZEBRA_ROUTE_*" constants in "ryu.lib.packet.zebra" module. + + :param route_type: Route type name (e.g., Kernel, BGP). + :return: Constant value for Zebra route type. + """ + return getattr(zebra, "ZEBRA_ROUTE_%s" % route_type.upper()) + + +class ZServer(object): + """ + Zebra server class. + """ + + def __init__(self, client): + self.client = client + self.logger = client.logger + self.is_active = False + self.sock = None # Client socket connecting to Zebra server + self.threads = [] + + def start(self): + self.is_active = True + try: + self.sock = create_connection(self.client.zserv_addr) + except socket.error as e: + self.logger.exception( + 'Cannot connect to Zebra server%s: %s', + self.client.zserv_addr, e) + self.stop() + return None + + self.sock.settimeout(GLOBAL_CONF.socket_timeout) + + self.threads.append(hub.spawn(self._send_loop)) + self.threads.append(hub.spawn(self._recv_loop)) + + # Send the following messages at starting connection. + # - ZEBRA_HELLO to register route_type + # - ZEBRA_ROUTER_ID_ADD to get router_id + # - ZEBRA_INTERFACE_ADD to get info for interfaces + self.client.send_msg( + zebra.ZebraMessage( + version=self.client.zserv_ver, + body=zebra.ZebraHello(self.client.route_type))) + self.client.send_msg( + zebra.ZebraMessage( + version=self.client.zserv_ver, + command=zebra.ZEBRA_ROUTER_ID_ADD)) + self.client.send_msg( + zebra.ZebraMessage( + version=self.client.zserv_ver, + command=zebra.ZEBRA_INTERFACE_ADD)) + + self.client.send_event_to_observers( + zclient_event.EventZServConnected(self)) + + hub.joinall(self.threads) + + self.client.send_event_to_observers( + zclient_event.EventZServDisconnected(self)) + + def stop(self): + self.is_active = False + + def _send_loop(self): + try: + while self.is_active: + buf = self.client.send_q.get() + self.sock.sendall(buf) + except socket.error as e: + self.logger.exception( + 'Error while sending message to Zebra server%s: %s', + self.client.zserv_addr, e) + + self.stop() + + def _recv_loop(self): + buf = b'' + min_len = recv_len = zebra.ZebraMessage.get_header_size( + self.client.zserv_ver) + try: + while self.is_active: + try: + recv_buf = self.sock.recv(recv_len) + except socket.timeout: + continue + + if len(recv_buf) == 0: + break + + buf += recv_buf + while len(buf) >= min_len: + (length,) = struct.unpack_from('!H', buf) + if (length - len(buf)) > 0: + # Need to receive remaining data + recv_len = length - len(buf) + break + + msg, _, buf = zebra._ZebraMessageFromZebra.parser(buf) + + ev = event.message_to_event(self.client, msg) + if ev: + self.client.send_event_to_observers(ev) + + except socket.error as e: + self.logger.exception( + 'Error while sending message to Zebra server%s: %s', + self.client.zserv_addr, e) + + self.stop() + + +class ZClient(RyuApp): + """ + The base class for Zebra client application. + """ + _EVENTS = event.ZEBRA_EVENTS + [ + zclient_event.EventZServConnected, + zclient_event.EventZServDisconnected, + ] + + def __init__(self, *args, **kwargs): + super(ZClient, self).__init__(*args, **kwargs) + self.zserv = None # ZServer instance + self.zserv_addr = (CONF.server_host, CONF.server_port) + self.zserv_ver = CONF.server_version + self.send_q = hub.Queue(16) + self.route_type = get_zebra_route_type_by_name( + CONF.client_route_type) + + def start(self): + super(ZClient, self).start() + + return hub.spawn(self._service_loop) + + def _service_loop(self): + while self.is_active: + self.zserv = ZServer(self) + self.zserv.start() + + hub.sleep(CONF.retry_interval) + + self.close() + + def close(self): + self.is_active = False + self._send_event(self._event_stop, None) + self.zserv.stop() + + def send_msg(self, msg): + """ + Sends Zebra message. + + :param msg: Instance of py:class: `ryu.lib.packet.zebra.ZebraMessage`. + :return: Serialized msg if succeeded, otherwise None. + """ + if not self.is_active: + self.logger.debug( + 'Cannot send message: Already deactivated: msg=%s', msg) + return + elif not self.send_q: + self.logger.debug( + 'Cannot send message: Send queue does not exist: msg=%s', msg) + return + elif self.zserv_ver != msg.version: + self.logger.debug( + 'Zebra protocol version mismatch:' + 'server_version=%d, msg.version=%d', + self.zserv_ver, msg.version) + msg.version = self.zserv_ver # fixup + + self.send_q.put(msg.serialize()) + + def _send_ip_route_impl( + self, prefix, nexthops=None, + safi=packet_safi.UNICAST, flags=zebra.ZEBRA_FLAG_INTERNAL, + distance=None, metric=None, mtu=None, tag=None, + is_withdraw=False): + if ip.valid_ipv4(prefix): + if is_withdraw: + msg_cls = zebra.ZebraIPv4RouteDelete + else: + msg_cls = zebra.ZebraIPv4RouteAdd + elif ip.valid_ipv6(prefix): + if is_withdraw: + msg_cls = zebra.ZebraIPv6RouteDelete + else: + msg_cls = zebra.ZebraIPv6RouteAdd + else: + raise ValueError('Invalid prefix: %s' % prefix) + + nexthop_list = [] + for nexthop in nexthops: + if netaddr.valid_ipv4(nexthop): + nexthop_list.append(zebra.NextHopIPv4(addr=nexthop)) + elif netaddr.valid_ipv6(nexthop): + nexthop_list.append(zebra.NextHopIPv6(addr=nexthop)) + else: + raise ValueError('Invalid nexthop: %s' % nexthop) + + msg = zebra.ZebraMessage( + version=self.zserv_ver, + body=msg_cls( + route_type=self.route_type, + flags=flags, + message=0, + safi=safi, + prefix=prefix, + nexthops=nexthop_list, + distance=distance, + metric=metric, + mtu=mtu, + tag=tag)) + self.send_msg(msg) + + return msg + + def send_ip_route_add( + self, prefix, nexthops=None, + safi=packet_safi.UNICAST, flags=zebra.ZEBRA_FLAG_INTERNAL, + distance=None, metric=None, mtu=None, tag=None): + """ + Sends ZEBRA_IPV4/v6_ROUTE_ADD message to Zebra daemon. + + :param prefix: IPv4/v6 Prefix to advertise. + :param nexthops: List of nexthop addresses. + :param safi: SAFI to advertise. + :param flags: Message flags to advertise. See "ZEBRA_FLAG_*". + :param distance: (Optional) Distance to advertise. + :param metric: (Optional) Metric to advertise. + :param mtu: (Optional) MTU size to advertise. + :param tag: (Optional) TAG information to advertise. + :return: Zebra message instance to be sent. None if failed. + """ + try: + return self._send_ip_route_impl( + prefix=prefix, nexthops=nexthops, safi=safi, flags=flags, + distance=distance, metric=metric, mtu=mtu, tag=tag, + is_withdraw=False) + except ValueError as e: + self.logger.exception( + 'Cannot send IP route add message: %s', e) + return None + + def send_ip_route_delete( + self, prefix, nexthops=None, + safi=packet_safi.UNICAST, flags=zebra.ZEBRA_FLAG_INTERNAL, + distance=None, metric=None, mtu=None, tag=None): + """ + Sends ZEBRA_IPV4/v6_ROUTE_DELETE message to Zebra daemon. + + :param prefix: IPv4/v6 Prefix to advertise. + :param nexthops: List of nexthop addresses. + :param safi: SAFI to advertise. + :param flags: Message flags to advertise. See "ZEBRA_FLAG_*". + :param distance: (Optional) Distance to advertise. + :param metric: (Optional) Metric to advertise. + :param mtu: (Optional) MTU size to advertise. + :param tag: (Optional) TAG information to advertise. + :return: Zebra message instance to be sent. None if failed. + """ + try: + return self._send_ip_route_impl( + prefix=prefix, nexthops=nexthops, safi=safi, flags=flags, + distance=distance, metric=metric, mtu=mtu, tag=tag, + is_withdraw=True) + except ValueError as e: + self.logger.exception( + 'Cannot send IP route delete message: %s', e) + return None diff -Nru ryu-4.9/ryu/services/protocols/zebra/db/base.py ryu-4.15/ryu/services/protocols/zebra/db/base.py --- ryu-4.9/ryu/services/protocols/zebra/db/base.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/db/base.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,70 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import functools +import logging + +from sqlalchemy.ext.declarative import declarative_base + + +LOG = logging.getLogger(__name__) + +Base = declarative_base() +""" +Base class for Zebra protocol database tables. +""" + + +def _repr(self): + m = ', '.join( + ['%s=%r' % (k, v) + for k, v in self.__dict__.items() if not k.startswith('_')]) + return "%s(%s)" % (self.__class__.__name__, m) + +Base.__repr__ = _repr + + +def sql_function(func): + """ + Decorator for wrapping the given function in order to manipulate (CRUD) + the records safely. + + For the adding/updating/deleting records function, this decorator + invokes "Session.commit()" after the given function. + If any exception while modifying records raised, this decorator invokes + "Session.rollbacks()". + """ + @functools.wraps(func) + def _wrapper(session, *args, **kwargs): + ret = None + try: + ret = func(session, *args, **kwargs) + if session.dirty: + # If the given function has any update to records, + # commits them. + session.commit() + except Exception as e: + # If any exception raised, rollbacks the transaction. + LOG.error('Error in %s: %s', func.__name__, e) + if session.dirty: + LOG.error('Do rolling back %s table', + session.dirty[0].__tablename__) + session.rollback() + + return ret + + return _wrapper diff -Nru ryu-4.9/ryu/services/protocols/zebra/db/__init__.py ryu-4.15/ryu/services/protocols/zebra/db/__init__.py --- ryu-4.9/ryu/services/protocols/zebra/db/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/db/__init__.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,42 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Database implementation for Zebra protocol service. +""" + +from __future__ import absolute_import + +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +from ryu import cfg + +# Configuration parameters for Zebra service +CONF = cfg.CONF['zapi'] + +# Connect to database +ENGINE = create_engine(CONF.db_url) + +Session = sessionmaker(bind=ENGINE) +""" +Session class connecting to database +""" + +# Create all tables +from . import base +from . import interface +from . import route +base.Base.metadata.create_all(ENGINE) diff -Nru ryu-4.9/ryu/services/protocols/zebra/db/interface.py ryu-4.15/ryu/services/protocols/zebra/db/interface.py --- ryu-4.9/ryu/services/protocols/zebra/db/interface.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/db/interface.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,271 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import logging + +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy import String + +from ryu.lib import netdevice +from ryu.lib import ip +from ryu.lib.packet import zebra + +from . import base + + +LOG = logging.getLogger(__name__) + +# Default value for ethernet interface +DEFAULT_ETH_FLAGS = ( + netdevice.IFF_UP + | netdevice.IFF_BROADCAST + | netdevice.IFF_RUNNING + | netdevice.IFF_MULTICAST) +DEFAULT_ETH_MTU = 1500 + + +class Interface(base.Base): + """ + Interface table for Zebra protocol service. + + The default value for each fields suppose "Loopback" interface. + + ``ifindex``: Number of index. + + ``ifname``: Name of this interface. + + ``status``: A combination of flags + "ryu.lib.packet.zebra.ZEBRA_INTERFACE_*". + The default value shows "active" and "link-detect". + + ``flags``: A combination of flags "ryu.lib.netdevice.IFF_*". + The default value show "up", "loopback" and "running". + + ``metric``: Metric of this interface. + + ``ifmtu``: IPv4 MTU of this interface. + + ``ifmtu6``: IPv6 MTU of this interface. + + ``bandwidth``: Bandwidth of this interface. + + ``ll_type``: Link Layer Type. + One of "ryu.lib.packet.zebra.ZEBRA_LLT_*" types. + + ``hw_addr``: Hardware address of this interface (mostly, MAC address). + + ``inet``: List of IPv4 addresses separated by a comma. + (e.g., "192.168.1.100/24,192.168.2.100/24)". + + ``inet6``: List of IPv6 addresses separated by a comma. + """ + __tablename__ = 'interface' + + ifindex = Column(Integer, primary_key=True) + ifname = Column(String, default="lo") + status = Column( + Integer, + default=( + zebra.ZEBRA_INTERFACE_ACTIVE + | zebra.ZEBRA_INTERFACE_LINKDETECTION)) + flags = Column( + Integer, + default=( + netdevice.IFF_UP + | netdevice.IFF_LOOPBACK + | netdevice.IFF_RUNNING)) + metric = Column(Integer, default=1) + ifmtu = Column(Integer, default=0x10000) + ifmtu6 = Column(Integer, default=0x10000) + bandwidth = Column(Integer, default=0) + ll_type = Column(Integer, default=zebra.ZEBRA_LLT_ETHER) + hw_addr = Column(String, default='00:00:00:00:00:00') + # Note: Only the PostgreSQL backend has support sqlalchemy.ARRAY, + # we use the comma separated string as array instead. + inet = Column(String, default='') + inet6 = Column(String, default='') + + +@base.sql_function +def ip_link_show(session, **kwargs): + """ + Returns a first interface record matching the given filtering rules. + + The arguments for "kwargs" is the same with Interface class. + + :param session: Session instance connecting to database. + :param kwargs: Filtering rules to query. + :return: An instance of Interface record. + """ + return session.query(Interface).filter_by(**kwargs).first() + + +@base.sql_function +def ip_link_show_all(session, **kwargs): + """ + Returns all interface records matching the given filtering rules. + + The arguments for "kwargs" is the same with Interface class. + + :param session: Session instance connecting to database. + :param kwargs: Filtering rules to query. + :return: A list of Interface records. + """ + return session.query(Interface).filter_by(**kwargs).all() + + +@base.sql_function +def ip_link_add(session, name, type_='loopback', lladdr='00:00:00:00:00:00'): + """ + Adds an interface record into Zebra protocol service database. + + The arguments are similar to "ip link add" command of iproute2. + + :param session: Session instance connecting to database. + :param name: Name of interface. + :param type_: Type of interface. 'loopback' or 'ethernet'. + :param lladdr: Link layer address. Mostly MAC address. + :return: Instance of added record or already existing record. + """ + intf = ip_link_show(session, ifname=name) + if intf: + LOG.debug('Interface "%s" already exists: %s', intf.ifname, intf) + return intf + + if type_ == 'ethernet': + intf = Interface( + ifname=name, + flags=DEFAULT_ETH_FLAGS, + ifmtu=DEFAULT_ETH_MTU, + ifmtu6=DEFAULT_ETH_MTU, + hw_addr=lladdr) + else: # type_ == 'loopback': + intf = Interface( + ifname=name, + inet='127.0.0.1/8', + inet6='::1/128') + + session.add(intf) + + return intf + + +@base.sql_function +def ip_link_delete(session, name): + """ + Deletes an interface record from Zebra protocol service database. + + The arguments are similar to "ip link delete" command of iproute2. + + :param session: Session instance connecting to database. + :param name: Name of interface. + :return: Name of interface which was deleted. None if failed. + """ + intf = ip_link_show(session, ifname=name) + if not intf: + LOG.debug('Interface "%s" does not exist', name) + return None + + session.delete(intf) + + return name + + +# Currently, functions corresponding to "ip link show" and "ip address show" +# have the same implementation. +ip_address_show = ip_link_show +ip_address_show_all = ip_link_show_all + + +@base.sql_function +def ip_address_add(session, ifname, ifaddr): + """ + Adds an IP address to interface record identified with the given "ifname". + + The arguments are similar to "ip address add" command of iproute2. + + :param session: Session instance connecting to database. + :param ifname: Name of interface. + :param ifaddr: IPv4 or IPv6 address. + :return: Instance of record or "None" if failed. + """ + def _append_inet_addr(intf_inet, addr): + addr_list = intf_inet.split(',') + if addr in addr_list: + LOG.debug( + 'Interface "%s" has already "ifaddr": %s', + intf.ifname, addr) + return intf_inet + else: + addr_list.append(addr) + return ','.join(addr_list) + + intf = ip_link_show(session, ifname=ifname) + if not intf: + LOG.debug('Interface "%s" does not exist', ifname) + return None + + if ip.valid_ipv4(ifaddr): + intf.inet = _append_inet_addr(intf.inet, ifaddr) + elif ip.valid_ipv6(ifaddr): + intf.inet6 = _append_inet_addr(intf.inet6, ifaddr) + else: + LOG.debug('Invalid IP address for "ifaddr": %s', ifaddr) + return None + + return intf + + +@base.sql_function +def ip_address_delete(session, ifname, ifaddr): + """ + Deletes an IP address from interface record identified with the given + "ifname". + + The arguments are similar to "ip address delete" command of iproute2. + + :param session: Session instance connecting to database. + :param ifname: Name of interface. + :param ifaddr: IPv4 or IPv6 address. + :return: Instance of record or "None" if failed. + """ + def _remove_inet_addr(intf_inet, addr): + addr_list = intf_inet.split(',') + if addr not in addr_list: + LOG.debug( + 'Interface "%s" does not have "ifaddr": %s', + intf.ifname, addr) + return intf_inet + else: + addr_list.remove(addr) + return ','.join(addr_list) + + intf = ip_link_show(session, ifname=ifname) + if not intf: + LOG.debug('Interface "%s" does not exist', ifname) + return None + + if ip.valid_ipv4(ifaddr): + intf.inet = _remove_inet_addr(intf.inet, ifaddr) + elif ip.valid_ipv6(ifaddr): + intf.inet6 = _remove_inet_addr(intf.inet6, ifaddr) + else: + LOG.debug('Invalid IP address for "ifaddr": %s', ifaddr) + return None + + return intf diff -Nru ryu-4.9/ryu/services/protocols/zebra/db/route.py ryu-4.15/ryu/services/protocols/zebra/db/route.py --- ryu-4.9/ryu/services/protocols/zebra/db/route.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/db/route.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,201 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +import logging +import socket + +import netaddr +from sqlalchemy import Column +from sqlalchemy import Boolean +from sqlalchemy import Integer +from sqlalchemy import String + +from ryu.lib.packet import safi as packet_safi +from ryu.lib.packet import zebra + +from . import base +from . import interface + + +LOG = logging.getLogger(__name__) + + +class Route(base.Base): + """ + Route table (like routing table) for Zebra protocol service. + + ``id``: (Primary Key) ID of this route. + + ``family``: Address Family, not AFI (Address Family Identifiers). + Mostly, "socket.AF_INET" or "socket.AF_INET6". + + ``safi``: Subsequent Address Family Identifiers. + + ``destination``: Destination prefix of this route. + + ``gateway``: Next hop address of this route. + The default is "" (empty string). + + ``ifindex``: Index of interface to forward packets. + + ``source``: Source IP address of this route, which should be an + address assigned to the local interface. + + ``route_type``: Route Type of this route. + This type shows which daemon (or kernel) generated this route. + + ``is_selected``: Whether this route is selected for "destination". + """ + __tablename__ = 'route' + + id = Column(Integer, primary_key=True) + family = Column(Integer, default=socket.AF_INET) + safi = Column(Integer, default=packet_safi.UNICAST) + destination = Column(String, default='0.0.0.0/0') + gateway = Column(String, default='') + ifindex = Column(Integer, default=0) + source = Column(String, default='') + route_type = Column(Integer, default=zebra.ZEBRA_ROUTE_KERNEL) + is_selected = Column(Boolean, default=False) + + +@base.sql_function +def ip_route_show(session, destination, device, **kwargs): + """ + Returns a selected route record matching the given filtering rules. + + The arguments are similar to "ip route showdump" command of iproute2. + + :param session: Session instance connecting to database. + :param destination: Destination prefix. + :param device: Source device. + :param kwargs: Filtering rules to query. + :return: Instance of route record or "None" if failed. + """ + intf = interface.ip_link_show(session, ifname=device) + if not intf: + LOG.debug('Interface "%s" does not exist', device) + return None + + return session.query(Route).filter_by( + destination=destination, ifindex=intf.ifindex, **kwargs).first() + + +@base.sql_function +def ip_route_show_all(session, **kwargs): + """ + Returns a selected route record matching the given filtering rules. + + The arguments are similar to "ip route showdump" command of iproute2. + + If "is_selected=True", disables the existing selected route for the + given destination. + + :param session: Session instance connecting to database. + :param kwargs: Filtering rules to query. + :return: A list of route records. + """ + return session.query(Route).filter_by(**kwargs).all() + + +@base.sql_function +def ip_route_add(session, destination, device=None, gateway='', source='', + ifindex=0, route_type=zebra.ZEBRA_ROUTE_KERNEL, + is_selected=True): + """ + Adds a route record into Zebra protocol service database. + + The arguments are similar to "ip route add" command of iproute2. + + If "is_selected=True", disables the existing selected route for the + given destination. + + :param session: Session instance connecting to database. + :param destination: Destination prefix. + :param device: Source device. + :param gateway: Gateway IP address. + :param source: Source IP address. + :param ifindex: Index of source device. + :param route_type: Route type of daemon (or kernel). + :param is_selected: If select the given route as "in use" or not. + :return: Instance of record or "None" if failed. + """ + if device: + intf = interface.ip_link_show(session, ifname=device) + if not intf: + LOG.debug('Interface "%s" does not exist', device) + return None + ifindex = ifindex or intf.ifindex + + route = ip_route_show(session, destination=destination, device=device) + if route: + LOG.debug( + 'Route to "%s" already exists on "%s" device', + destination, device) + return route + + dest_addr, dest_prefix_num = destination.split('/') + dest_prefix_num = int(dest_prefix_num) + if netaddr.valid_ipv4(dest_addr) and 0 <= dest_prefix_num <= 32: + family = socket.AF_INET + elif netaddr.valid_ipv6(dest_addr) and 0 <= dest_prefix_num <= 128: + family = socket.AF_INET6 + else: + LOG.debug('Invalid IP address for "prefix": %s', destination) + return None + safi = packet_safi.UNICAST + + if is_selected: + old_routes = ip_route_show_all( + session, destination=destination, is_selected=True) + for old_route in old_routes: + if old_route: + LOG.debug('Set existing route to unselected: %s', old_route) + old_route.is_selected = False + + new_route = Route( + family=family, + safi=safi, + destination=destination, + gateway=gateway, + ifindex=ifindex, + source=source, + route_type=route_type, + is_selected=is_selected) + + session.add(new_route) + + return new_route + + +@base.sql_function +def ip_route_delete(session, destination, **kwargs): + """ + Deletes route record(s) from Zebra protocol service database. + + The arguments are similar to "ip route delete" command of iproute2. + + :param session: Session instance connecting to database. + :param destination: Destination prefix. + :param kwargs: Filtering rules to query. + :return: Records which are deleted. + """ + routes = ip_route_show_all(session, destination=destination, **kwargs) + for route in routes: + session.delete(route) + + return routes diff -Nru ryu-4.9/ryu/services/protocols/zebra/event.py ryu-4.15/ryu/services/protocols/zebra/event.py --- ryu-4.9/ryu/services/protocols/zebra/event.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/event.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,122 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Events for Zebra protocol service. +""" + +import inspect +import logging + +from ryu import utils +from ryu.controller import event +from ryu.lib.packet import zebra + + +LOG = logging.getLogger(__name__) +MOD = utils.import_module(__name__) + +ZEBRA_EVENTS = [] + + +class EventZebraBase(event.EventBase): + """ + The base class for Zebra protocol service event class. + + The subclasses have at least ``zclient`` and the same attributes with + :py:class: `ryu.lib.packet.zebra.ZebraMessage`. + ``zclient`` is an instance of Zebra client class. See + :py:class: `ryu.services.protocols.zebra.client.zclient.ZClient` or + :py:class: `ryu.services.protocols.zebra.server.zserver.ZClient`. + + The subclasses are named as:: + + ``"Event" + `` + + For Example, if the service received ZEBRA_INTERFACE_ADD message, + the body class should be + :py:class: `ryu.lib.packet.zebra.ZebraInterfaceAdd`, then the event + class will be named as:: + + "Event" + "ZebraInterfaceAdd" = "EventZebraInterfaceAdd" + + ``msg`` argument must be an instance of + :py:class: `ryu.lib.packet.zebra.ZebraMessage` and used to extract the + attributes for the event classes. + """ + + def __init__(self, zclient, msg): + super(EventZebraBase, self).__init__() + assert isinstance(msg, zebra.ZebraMessage) + self.__dict__ = msg.__dict__ + self.zclient = zclient + + def __repr__(self): + m = ', '.join( + ['%s=%r' % (k, v) + for k, v in self.__dict__.items() if not k.startswith('_')]) + return "%s(%s)" % (self.__class__.__name__, m) + + __str__ = __repr__ + + +def _event_name(body_cls): + return 'Event%s' % body_cls.__name__ + + +def message_to_event(zclient, msg): + """ + Converts Zebra protocol message instance to Zebra protocol service + event instance. + + If corresponding event class is not defined, returns None. + + :param zclient: Zebra client instance. + :param msg: Zebra protocol message. + :return: Zebra protocol service event. + """ + if not isinstance(msg, zebra.ZebraMessage): + return None + + body_cls = zebra._ZebraMessageBody.lookup_command(msg.command) + ev_cls = getattr(MOD, _event_name(body_cls), None) + if ev_cls is None: + return None + + return ev_cls(zclient, msg) + + +def _define_event_class(body_cls): + name = _event_name(body_cls) + + event_cls = type(name, (EventZebraBase,), {}) + globals()[name] = event_cls + + return event_cls + + +def _generate_event_classes(): + for zebra_cls in zebra.__dict__.values(): + if (not inspect.isclass(zebra_cls) + or not issubclass(zebra_cls, zebra._ZebraMessageBody) + or zebra_cls.__name__.startswith('_')): + continue + + ev = _define_event_class(zebra_cls) + # LOG.debug('Generated Zebra event: %s' % ev) + ZEBRA_EVENTS.append(ev) + + +_generate_event_classes() diff -Nru ryu-4.9/ryu/services/protocols/zebra/__init__.py ryu-4.15/ryu/services/protocols/zebra/__init__.py --- ryu-4.9/ryu/services/protocols/zebra/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/__init__.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,18 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Implementation for Zebra protocol service. +""" diff -Nru ryu-4.9/ryu/services/protocols/zebra/server/event.py ryu-4.15/ryu/services/protocols/zebra/server/event.py --- ryu-4.9/ryu/services/protocols/zebra/server/event.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/server/event.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,46 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Events generated by Zebra Server service. +""" + +from ryu.controller.event import EventBase + + +class EventZServerBase(EventBase): + """ + The base class for the event generated by ZServer. + """ + + +class EventZClientConnected(EventZServerBase): + """ + The event class for notifying the connection from Zebra client. + """ + + def __init__(self, zclient): + super(EventZClientConnected, self).__init__() + self.zclient = zclient + + +class EventZClientDisconnected(EventZServerBase): + """ + The event class for notifying the disconnection to Zebra client. + """ + + def __init__(self, zclient): + super(EventZClientDisconnected, self).__init__() + self.zclient = zclient diff -Nru ryu-4.9/ryu/services/protocols/zebra/server/__init__.py ryu-4.15/ryu/services/protocols/zebra/server/__init__.py --- ryu-4.9/ryu/services/protocols/zebra/server/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/server/__init__.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,20 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Server implementation for Zebra protocol service. + +This module provides the server side implementation for Zebra protocol. +""" diff -Nru ryu-4.9/ryu/services/protocols/zebra/server/sample_dumper.py ryu-4.15/ryu/services/protocols/zebra/server/sample_dumper.py --- ryu-4.9/ryu/services/protocols/zebra/server/sample_dumper.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/server/sample_dumper.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,54 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Sample Zebra Server application dumping received events. +""" + +from ryu.base.app_manager import RyuApp +from ryu.controller.handler import set_ev_cls +from ryu.services.protocols.zebra import event +from ryu.services.protocols.zebra.server.zserver import ZServer +from ryu.services.protocols.zebra.server import event as zserver_event + + +class ZServerDumper(RyuApp): + _CONTEXTS = { + "zserver": ZServer, + } + + def __init__(self, *args, **kwargs): + super(ZServerDumper, self).__init__(*args, **kwargs) + self.zserver = kwargs["zserver"] + + @set_ev_cls(zserver_event.EventZClientConnected) + def _zclient_connected_handler(self, ev): + self.logger.info('Zebra client connected: %s', ev.zclient.addr) + + @set_ev_cls(zserver_event.EventZClientDisconnected) + def _zclient_disconnected_handler(self, ev): + self.logger.info('Zebra client disconnected: %s', ev.zclient.addr) + + @set_ev_cls([event.EventZebraIPv4RouteAdd, + event.EventZebraIPv6RouteAdd]) + def _ip_route_add_handler(self, ev): + self.logger.info( + 'Client %s advertised IP route: %s', ev.zclient.addr, ev.body) + + @set_ev_cls([event.EventZebraIPv4RouteDelete, + event.EventZebraIPv6RouteDelete]) + def _ip_route_delete_handler(self, ev): + self.logger.info( + 'Client %s withdrew IP route: %s', ev.zclient.addr, ev.body) diff -Nru ryu-4.9/ryu/services/protocols/zebra/server/zserver.py ryu-4.15/ryu/services/protocols/zebra/server/zserver.py --- ryu-4.9/ryu/services/protocols/zebra/server/zserver.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/services/protocols/zebra/server/zserver.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,333 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Zebra Server corresponding to 'zserv' structure. +""" + +import contextlib +import logging +import os +import socket +import struct + +import netaddr + +from ryu import cfg +from ryu.base import app_manager +from ryu.base.app_manager import RyuApp +from ryu.controller.handler import set_ev_cls +from ryu.lib import hub +from ryu.lib.packet import zebra + +from ryu.services.protocols.zebra import db +from ryu.services.protocols.zebra import event +from ryu.services.protocols.zebra.server import event as zserver_event + + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF['zapi'] +GLOBAL_CONF = cfg.CONF + +# Session to database of Zebra protocol service +SESSION = db.Session() + + +class ZClient(object): + """ + Zebra client class. + """ + + def __init__(self, server, sock, addr): + self.server = server + self.sock = sock + self.addr = addr + self.logger = server.logger + self.is_active = False + self._threads = [] + self.send_q = hub.Queue(16) + + # Zebra protocol version + self.zserv_ver = CONF.server_version + + # Zebra route type distributed by client (not initialized yet) + self.route_type = None + + def start(self): + self.is_active = True + self.sock.settimeout(GLOBAL_CONF.socket_timeout) + + self._threads.append(hub.spawn(self._send_loop)) + self._threads.append(hub.spawn(self._recv_loop)) + + self.server.send_event_to_observers( + zserver_event.EventZClientConnected(self)) + + hub.joinall(self._threads) + + self.server.send_event_to_observers( + zserver_event.EventZClientDisconnected(self)) + + def stop(self): + self.is_active = False + + def _send_loop(self): + try: + while self.is_active: + buf = self.send_q.get() + self.sock.sendall(buf) + except socket.error as e: + self.logger.exception( + 'Error while sending message to Zebra client%s: %s', + self.addr, e) + + self.stop() + + def _recv_loop(self): + buf = b'' + min_len = recv_len = zebra.ZebraMessage.get_header_size( + self.zserv_ver) + try: + while self.is_active: + try: + recv_buf = self.sock.recv(recv_len) + except socket.timeout: + continue + + if len(recv_buf) == 0: + break + + buf += recv_buf + while len(buf) >= min_len: + (length,) = struct.unpack_from('!H', buf) + if (length - len(buf)) > 0: + # Need to receive remaining data + recv_len = length - len(buf) + break + + msg, _, buf = zebra.ZebraMessage.parser(buf) + + ev = event.message_to_event(self, msg) + if ev: + self.logger.debug('Notify event: %s', ev) + self.server.send_event_to_observers(ev) + + except socket.error as e: + self.logger.exception( + 'Error while sending message to Zebra client%s: %s', + self.addr, e) + + self.stop() + + def send_msg(self, msg): + """ + Sends Zebra message. + + :param msg: Instance of py:class: `ryu.lib.packet.zebra.ZebraMessage`. + :return: Serialized msg if succeeded, otherwise None. + """ + if not self.is_active: + self.logger.debug( + 'Cannot send message: Already deactivated: msg=%s', msg) + return + elif not self.send_q: + self.logger.debug( + 'Cannot send message: Send queue does not exist: msg=%s', msg) + return + elif self.zserv_ver != msg.version: + self.logger.debug( + 'Zebra protocol version mismatch:' + 'server_version=%d, msg.version=%d', + self.zserv_ver, msg.version) + msg.version = self.zserv_ver # fixup + + self.send_q.put(msg.serialize()) + + +def zclient_connection_factory(sock, addr): + LOG.debug('Connected from client: %s: %s', addr, sock) + zserv = app_manager.lookup_service_brick(ZServer.__name__) + with contextlib.closing(ZClient(zserv, sock, addr)) as zclient: + try: + zclient.start() + except Exception as e: + LOG.error('Error in client%s: %s', addr, e) + raise e + + +def detect_address_family(host): + if netaddr.valid_ipv4(host): + return socket.AF_INET + elif netaddr.valid_ipv6(host): + return socket.AF_INET6 + elif os.path.isdir(os.path.dirname(host)): + return socket.AF_UNIX + else: + return None + + +class ZServer(RyuApp): + """ + The base class for Zebra server application. + """ + _EVENTS = event.ZEBRA_EVENTS + [ + zserver_event.EventZClientConnected, + zserver_event.EventZClientDisconnected, + ] + + def __init__(self, *args, **kwargs): + super(ZServer, self).__init__(*args, **kwargs) + self.zserv = None + self.zserv_addr = (CONF.server_host, CONF.server_port) + self.zapi_connection_family = detect_address_family(CONF.server_host) + + # Initial Router ID for Zebra server + self.router_id = CONF.router_id + + def start(self): + super(ZServer, self).start() + + if self.zapi_connection_family == socket.AF_UNIX: + unix_sock_dir = os.path.dirname(CONF.server_host) + # Makes sure the unix socket does not already exist + if os.path.exists(CONF.server_host): + os.remove(CONF.server_host) + if not os.path.isdir(unix_sock_dir): + os.mkdir(unix_sock_dir) + os.chmod(unix_sock_dir, 0o777) + + try: + self.zserv = hub.StreamServer( + self.zserv_addr, zclient_connection_factory) + except OSError as e: + self.logger.error( + 'Cannot start Zebra server%s: %s', self.zserv_addr, e) + raise e + + if self.zapi_connection_family == socket.AF_UNIX: + os.chmod(CONF.server_host, 0o777) + + self._add_lo_interface() + + return hub.spawn(self.zserv.serve_forever) + + def _add_lo_interface(self): + intf = db.interface.ip_link_add(SESSION, 'lo') + if intf: + self.logger.debug('Added interface "%s": %s', intf.ifname, intf) + + route = db.route.ip_route_add( + SESSION, + destination='127.0.0.0/8', + device='lo', + source='127.0.0.1/8', + route_type=zebra.ZEBRA_ROUTE_CONNECT) + if route: + self.logger.debug( + 'Added route to "%s": %s', route.destination, route) + + @set_ev_cls(event.EventZebraHello) + def _hello_handler(self, ev): + if ev.body is None: + self.logger.debug('Client %s says hello.', ev.zclient) + return + + # Set distributed route_type to ZClient + ev.zclient.route_type = ev.body.route_type + self.logger.debug( + 'Client %s says hello and bids fair to announce only %s routes', + ev.zclient, ev.body.route_type) + + @set_ev_cls(event.EventZebraRouterIDAdd) + def _router_id_add_handler(self, ev): + self.logger.debug( + 'Client %s requests router_id, server will response: router_id=%s', + ev.zclient, self.router_id) + + # Send ZEBRA_ROUTER_ID_UPDATE for response + msg = zebra.ZebraMessage( + body=zebra.ZebraRouterIDUpdate( + family=socket.AF_INET, + prefix='%s/32' % self.router_id)) + ev.zclient.send_msg(msg) + + @set_ev_cls(event.EventZebraInterfaceAdd) + def _interface_add_handler(self, ev): + self.logger.debug('Client %s requested all interfaces', ev.zclient) + + interfaces = db.interface.ip_address_show_all(SESSION) + self.logger.debug('Server will response interfaces: %s', interfaces) + for intf in interfaces: + msg = zebra.ZebraMessage( + body=zebra.ZebraInterfaceAdd( + ifname=intf.ifname, + ifindex=intf.ifindex, + status=intf.status, + if_flags=intf.flags, + metric=intf.metric, + ifmtu=intf.ifmtu, + ifmtu6=intf.ifmtu6, + bandwidth=intf.bandwidth, + ll_type=intf.ll_type, + hw_addr=intf.hw_addr)) + ev.zclient.send_msg(msg) + + routes = db.route.ip_route_show_all( + SESSION, ifindex=intf.ifindex, is_selected=True) + self.logger.debug('Server will response routes: %s', routes) + for route in routes: + dest, _ = route.destination.split('/') + msg = zebra.ZebraMessage( + body=zebra.ZebraInterfaceAddressAdd( + ifindex=intf.ifindex, + ifc_flags=0, + family=None, + prefix=route.source, + dest=dest)) + ev.zclient.send_msg(msg) + + @set_ev_cls([event.EventZebraIPv4RouteAdd, + event.EventZebraIPv6RouteAdd]) + def _ip_route_add_handler(self, ev): + self.logger.debug( + 'Client %s advertised IP route: %s', ev.zclient, ev.body) + + for nexthop in ev.body.nexthops: + route = db.route.ip_route_add( + SESSION, + destination=ev.body.prefix, + gateway=nexthop.addr, + ifindex=nexthop.ifindex or 0, + route_type=ev.body.route_type) + if route: + self.logger.debug( + 'Added route to "%s": %s', route.destination, route) + + @set_ev_cls([event.EventZebraIPv4RouteDelete, + event.EventZebraIPv6RouteDelete]) + def _ip_route_delete_handler(self, ev): + self.logger.debug( + 'Client %s withdrew IP route: %s', ev.zclient, ev.body) + + for nexthop in ev.body.nexthops: + routes = db.route.ip_route_delete( + SESSION, + destination=ev.body.prefix, + gateway=nexthop.addr, + route_type=ev.body.route_type) + if routes: + self.logger.debug( + 'Deleted routes to "%s": %s', ev.body.prefix, routes) diff -Nru ryu-4.9/ryu/tests/bin/ryu-client ryu-4.15/ryu/tests/bin/ryu-client --- ryu-4.9/ryu/tests/bin/ryu-client 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/bin/ryu-client 1970-01-01 00:00:00.000000000 +0000 @@ -1,107 +0,0 @@ -#!/usr/bin/env python -# -# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation. -# Copyright (C) 2011 Isaku Yamahata -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -from optparse import OptionParser - -from ryu.app.client import OFPClient -from ryu.app.client import QuantumIfaceClient -from ryu.app.client import SwitchConfClient -from ryu.app.client import TunnelClient -from ryu.app.client import TopologyClient - - -def client_test(): - parser = OptionParser(usage="Usage: %prog [OPTIONS] [args]") - parser.add_option("-H", "--host", dest="host", type="string", - default="127.0.0.1", help="ip address rest api service") - parser.add_option("-p", "--port", dest="port", type="int", default="8080") - - options, args = parser.parse_args() - if len(args) == 0: - parser.print_help() - sys.exit(1) - - address = options.host + ':' + str(options.port) - ofp_client = OFPClient(address) - tun_client = TunnelClient(address) - sc_client = SwitchConfClient(address) - qi_client = QuantumIfaceClient(address) - topo_client = TopologyClient(address) - - commands = { - 'list_nets': lambda a: sys.stdout.write(ofp_client.get_networks()), - 'create_net': lambda a: ofp_client.create_network(a[1]), - 'update_net': lambda a: ofp_client.update_network(a[1]), - 'delete_net': lambda a: ofp_client.delete_network(a[1]), - 'list_ports': lambda a: sys.stdout.write(ofp_client.get_ports(a[1])), - 'create_port': lambda a: ofp_client.create_port(a[1], a[2], a[3]), - 'update_port': lambda a: ofp_client.update_port(a[1], a[2], a[3]), - 'delete_port': lambda a: ofp_client.delete_port(a[1], a[2], a[3]), - - 'get_tun_key': lambda a: sys.stdout.write( - tun_client.get_tunnel_key(a[1])), - 'delete_tun_key': lambda a: tun_client.delete_tunnel_key(a[1]), - 'create_tun_key': lambda a: tun_client.create_tunnel_key(a[1], a[2]), - 'update_tun_key': lambda a: tun_client.update_tunnel_key(a[1], a[2]), - 'list_tun_ports': lambda a: sys.stdout.write( - tun_client.list_ports(a[1])), - 'delete_tun_port': lambda a: tun_client.delete_port(a[1], a[2]), - 'get_remote_dpid': lambda a: sys.stdout.write( - tun_client.get_remote_dpid(a[1], a[2])), - 'create_remote_dpid': lambda a: tun_client.create_remote_dpid( - a[1], a[2], a[3]), - 'update_remote_dpid': lambda a: tun_client.update_remote_dpid( - a[1], a[2], a[3]), - - 'sc_list_sw': lambda a: sys.stdout.write(sc_client.list_switches()), - 'sc_delete_sw': lambda a: sc_client.delete_switch(a[1]), - 'sc_list_keys': lambda a: sys.stdout.write(sc_client.list_keys(a[1])), - 'sc_set_key': lambda a: sc_client.set_key(a[1], a[2], a[3]), - 'sc_get_key': lambda a: sys.stdout.write( - sc_client.get_key(a[1], a[2])), - 'sc_delete_key': lambda a: sc_client.delete_key(a[1], a[2]), - - 'qi_list_iface': lambda a: sys.stdout.write(qi_client.list_ifaces()), - 'qi_delete_iface': lambda a: qi_client.delete_iface(a[1]), - 'qi_list_keys': lambda a: sys.stdout.write( - qi_client.list_keys(a[1])), - 'qi_create_key': lambda a: qi_client.create_network_id( - a[1], a[2], a[3]), - 'qi_update_key': lambda a: qi_client.update_network_id( - a[1], a[2], a[3]), - 'qi_get_net_id': lambda a: sys.stdout.write( - qi_client.get_network_id(a[1])), - 'qi_create_net_id': lambda a: qi_client.create_network_id(a[1], a[2]), - 'qi_update_net_id': lambda a: qi_client.update_network_id(a[1], a[2]), - - 'topo_list_switches': lambda a: topo_client.list_switches(), - 'topo_list_links': lambda a: topo_client.list_links(), - } - - # allow '-', instead of '_' - commands.update(dict([(k.replace('_', '-'), v) - for (k, v) in commands.items()])) - - cmd = args[0] - res = commands[cmd](args) - if res: - print res.read() - -if __name__ == "__main__": - client_test() diff -Nru ryu-4.9/ryu/tests/integrated/bgp/base_ip6.py ryu-4.15/ryu/tests/integrated/bgp/base_ip6.py --- ryu-4.9/ryu/tests/integrated/bgp/base_ip6.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/bgp/base_ip6.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,6 +16,8 @@ from __future__ import absolute_import +import logging +import sys import unittest from ryu.tests.integrated.common import docker_base as ctn_base @@ -23,22 +25,25 @@ from ryu.tests.integrated.common import quagga -class BgpSpeakerTestBase(unittest.TestCase): +LOG = logging.getLogger(__name__) + +class BgpSpeakerTestBase(unittest.TestCase): + images = [] + containers = [] + bridges = [] checktime = 120 @classmethod def setUpClass(cls): - cls.images = [] - cls.containers = [] - cls.bridges = [] - cls.brdc1 = ctn_base.Bridge(name='brip6dc1', subnet='2001:10::/32') cls.bridges.append(cls.brdc1) cls.dockerimg = ctn_base.DockerImage() - cls.r_img = cls.dockerimg.create_ryu(check_exist=True) + image = 'python:%d.%d' % ( + sys.version_info.major, sys.version_info.minor) + cls.r_img = cls.dockerimg.create_ryu(image=image, check_exist=True) cls.images.append(cls.r_img) cls.q_img = 'osrg/quagga' cls.images.append(cls.q_img) @@ -72,7 +77,7 @@ try: ctn.stop() except ctn_base.CommandError as e: - pass + LOG.exception('Exception when stopping containers: %s', e) ctn.remove() for br in cls.bridges: br.delete() diff -Nru ryu-4.9/ryu/tests/integrated/bgp/base.py ryu-4.15/ryu/tests/integrated/bgp/base.py --- ryu-4.9/ryu/tests/integrated/bgp/base.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/bgp/base.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,6 +16,8 @@ from __future__ import absolute_import +import logging +import sys import unittest from ryu.tests.integrated.common import docker_base as ctn_base @@ -23,22 +25,25 @@ from ryu.tests.integrated.common import quagga -class BgpSpeakerTestBase(unittest.TestCase): +LOG = logging.getLogger(__name__) + +class BgpSpeakerTestBase(unittest.TestCase): + images = [] + containers = [] + bridges = [] checktime = 120 @classmethod def setUpClass(cls): - cls.images = [] - cls.containers = [] - cls.bridges = [] - cls.brdc1 = ctn_base.Bridge(name='brdc1', subnet='192.168.10.0/24') cls.bridges.append(cls.brdc1) cls.dockerimg = ctn_base.DockerImage() - cls.r_img = cls.dockerimg.create_ryu(check_exist=True) + image = 'python:%d.%d' % ( + sys.version_info.major, sys.version_info.minor) + cls.r_img = cls.dockerimg.create_ryu(image=image, check_exist=True) cls.images.append(cls.r_img) cls.q_img = 'osrg/quagga' cls.images.append(cls.q_img) @@ -72,7 +77,7 @@ try: ctn.stop() except ctn_base.CommandError as e: - pass + LOG.exception('Exception when stopping containers: %s', e) ctn.remove() for br in cls.bridges: br.delete() diff -Nru ryu-4.9/ryu/tests/integrated/bgp/test_basic.py ryu-4.15/ryu/tests/integrated/bgp/test_basic.py --- ryu-4.9/ryu/tests/integrated/bgp/test_basic.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/bgp/test_basic.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,8 +18,8 @@ import time -from . import base from ryu.tests.integrated.common import docker_base as ctn_base +from . import base class BgpSpeakerBasicTest(base.BgpSpeakerTestBase): @@ -29,7 +29,8 @@ self.r1.start_ryubgp(retry=True) def test_check_neighbor_established(self): - for i in range(0, self.checktime): + neighbor_state = ctn_base.BGP_FSM_IDLE + for _ in range(0, self.checktime): neighbor_state = self.q1.get_neighbor_state(self.r1) if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED: break @@ -37,7 +38,8 @@ self.assertEqual(neighbor_state, ctn_base.BGP_FSM_ESTABLISHED) def test_check_rib_nexthop(self): - for i in range(0, self.checktime): + neighbor_state = ctn_base.BGP_FSM_IDLE + for _ in range(0, self.checktime): neighbor_state = self.q1.get_neighbor_state(self.r1) if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED: break diff -Nru ryu-4.9/ryu/tests/integrated/bgp/test_ip6_basic.py ryu-4.15/ryu/tests/integrated/bgp/test_ip6_basic.py --- ryu-4.9/ryu/tests/integrated/bgp/test_ip6_basic.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/bgp/test_ip6_basic.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,8 +18,8 @@ import time -from . import base_ip6 as base from ryu.tests.integrated.common import docker_base as ctn_base +from . import base_ip6 as base class BgpSpeakerBasicTest(base.BgpSpeakerTestBase): @@ -29,7 +29,8 @@ self.r1.start_ryubgp(retry=True) def test_check_neighbor_established(self): - for i in range(0, self.checktime): + neighbor_state = ctn_base.BGP_FSM_IDLE + for _ in range(0, self.checktime): neighbor_state = self.q1.get_neighbor_state(self.r1) if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED: break @@ -37,7 +38,8 @@ self.assertEqual(neighbor_state, ctn_base.BGP_FSM_ESTABLISHED) def test_check_rib_nexthop(self): - for i in range(0, self.checktime): + neighbor_state = ctn_base.BGP_FSM_IDLE + for _ in range(0, self.checktime): neighbor_state = self.q1.get_neighbor_state(self.r1) if neighbor_state == ctn_base.BGP_FSM_ESTABLISHED: break diff -Nru ryu-4.9/ryu/tests/integrated/common/docker_base.py ryu-4.15/ryu/tests/integrated/common/docker_base.py --- ryu-4.9/ryu/tests/integrated/common/docker_base.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/common/docker_base.py 2017-07-02 11:08:32.000000000 +0000 @@ -20,10 +20,10 @@ import itertools import logging +import os import subprocess import time -from docker import Client import netaddr import six @@ -61,8 +61,8 @@ def try_several_times(f, t=3, s=1): - e = None - for i in range(t): + e = RuntimeError() + for _ in range(t): try: r = f() except RuntimeError as e: @@ -85,7 +85,14 @@ class CommandOut(str): - pass + + def __new__(cls, stdout, stderr, command, returncode, **kwargs): + stdout = stdout or '' + obj = super(CommandOut, cls).__new__(cls, stdout, **kwargs) + obj.stderr = stderr or '' + obj.command = command + obj.returncode = returncode + return obj class Command(object): @@ -108,38 +115,27 @@ stdout=p_stdout, stderr=p_stderr) __stdout, __stderr = pop.communicate() - try: - if six.PY3 and isinstance(__stdout, six.binary_type): - _stdout = __stdout.decode('ascii') - else: - _stdout = __stdout - if six.PY3 and isinstance(__stderr, six.binary_type): - _stderr = __stderr.decode('ascii') - else: - _stderr = __stderr - except UnicodeError: - _stdout = __stdout - _stderr = __stderr - out = CommandOut(_stdout if _stdout else "") - out.stderr = _stderr if _stderr else "" - out.command = cmd - out.returncode = pop.returncode + _stdout = six.text_type(__stdout, 'utf-8') + _stderr = six.text_type(__stderr, 'utf-8') + out = CommandOut(_stdout, _stderr, cmd, pop.returncode) return out def execute(self, cmd, capture=True, try_times=1, interval=1): + out = None for i in range(try_times): out = self._execute(cmd, capture=capture) LOG.info(out.command) if out.returncode == 0: return out - LOG.error(out.stderr) + LOG.error("stdout: %s", out) + LOG.error("stderr: %s", out.stderr) if i + 1 >= try_times: break time.sleep(interval) raise CommandError(out) def sudo(self, cmd, capture=True, try_times=1, interval=1): - cmd = 'sudo ' + cmd + cmd = 'sudo %s' % cmd return self.execute(cmd, capture=capture, try_times=try_times, interval=interval) @@ -157,10 +153,7 @@ return images def exist(self, name): - if name in self.get_images(): - return True - else: - return False + return name in self.get_images() def build(self, tagname, dockerfile_dir): self.cmd.sudo( @@ -170,25 +163,29 @@ def remove(self, tagname, check_exist=False): if check_exist and not self.exist(tagname): return tagname - self.cmd.sudo("docker rmi -f " + tagname, try_times=3) + self.cmd.sudo("docker rmi -f %s" % tagname, try_times=3) def create_quagga(self, tagname='quagga', image=None, check_exist=False): if check_exist and self.exist(tagname): return tagname - workdir = TEST_BASE_DIR + '/' + tagname - pkges = 'telnet tcpdump quagga' + workdir = os.path.join(TEST_BASE_DIR, tagname) + pkges = ' '.join([ + 'telnet', + 'tcpdump', + 'quagga', + ]) if image: use_image = image else: use_image = self.baseimage c = CmdBuffer() - c << 'FROM ' + use_image + c << 'FROM %s' % use_image c << 'RUN apt-get update' - c << 'RUN apt-get install -qy --no-install-recommends ' + pkges + c << 'RUN apt-get install -qy --no-install-recommends %s' % pkges c << 'CMD /usr/lib/quagga/bgpd' - self.cmd.sudo('rm -rf ' + workdir) - self.cmd.execute('mkdir -p ' + workdir) + self.cmd.sudo('rm -rf %s' % workdir) + self.cmd.execute('mkdir -p %s' % workdir) self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir)) self.build(tagname, workdir) return tagname @@ -196,21 +193,11 @@ def create_ryu(self, tagname='ryu', image=None, check_exist=False): if check_exist and self.exist(tagname): return tagname - workdir = '%s/%s' % (TEST_BASE_DIR, tagname) + workdir = os.path.join(TEST_BASE_DIR, tagname) workdir_ctn = '/root/osrg/ryu' pkges = ' '.join([ - 'telnet', 'tcpdump', 'iproute2', - 'python-setuptools', - 'python-pip', - 'gcc', - 'python-dev', - 'libffi-dev', - 'libssl-dev', - 'libxml2-dev', - 'libxslt1-dev', - 'zlib1g-dev', ]) if image: use_image = image @@ -223,13 +210,17 @@ 'RUN apt-get update', '&& apt-get install -qy --no-install-recommends %s' % pkges, '&& cd %s' % workdir_ctn, + # Note: Clean previous builds, because "python setup.py install" + # might fail if the current directory contains the symlink to + # Docker host file systems. + '&& rm -rf *.egg-info/ build/ dist/ .tox/ *.log' '&& pip install -r tools/pip-requires -r tools/optional-requires', '&& python setup.py install', ]) c << install self.cmd.sudo('rm -rf %s' % workdir) - self.cmd.execute('mkdir -p ' + workdir) + self.cmd.execute('mkdir -p %s' % workdir) self.cmd.execute("echo '%s' > %s/Dockerfile" % (str(c), workdir)) self.cmd.execute('cp -r ../ryu %s/' % workdir) self.build(tagname, workdir) @@ -257,12 +248,9 @@ self.name = name if br_type not in (BRIDGE_TYPE_DOCKER, BRIDGE_TYPE_BRCTL, BRIDGE_TYPE_OVS): - raise Exception("argument error br_type: %s" % self.br_type) + raise Exception("argument error br_type: %s" % br_type) self.br_type = br_type - if self.br_type == BRIDGE_TYPE_DOCKER: - self.docker_nw = True - else: - self.docker_nw = False + self.docker_nw = bool(self.br_type == BRIDGE_TYPE_DOCKER) if TEST_PREFIX != '': self.name = '{0}_{1}'.format(TEST_PREFIX, name) self.with_ip = with_ip @@ -277,10 +265,10 @@ else: self.end_ip = netaddr.IPAddress(self.subnet.last) - def f(): + def _ip_gen(): for host in netaddr.IPRange(self.start_ip, self.end_ip): yield host - self._ip_generator = f() + self._ip_generator = _ip_gen() # throw away first network address self.next_ip_address() @@ -296,12 +284,14 @@ v6 = '' if self.subnet.version == 6: v6 = '--ipv6' - cmd = "docker network create --driver bridge %s " % v6 - cmd += "%s --subnet %s %s" % (gw, subnet, self.name) + cmd = ("docker network create --driver bridge %s " + "%s --subnet %s %s" % (v6, gw, subnet, self.name)) elif self.br_type == BRIDGE_TYPE_BRCTL: cmd = "ip link add {0} type bridge".format(self.name) elif self.br_type == BRIDGE_TYPE_OVS: cmd = "ovs-vsctl add-br {0}".format(self.name) + else: + raise ValueError('Unsupported br_type: %s' % self.br_type) self.delete() self.execute(cmd, sudo=True, retry=True) try_several_times(f) @@ -348,10 +338,7 @@ return self.get_bridges_ovs() def exist(self): - if self.name in self.get_bridges(): - return True - else: - return False + return self.name in self.get_bridges() def execute(self, cmd, capture=True, sudo=False, retry=False): if sudo: @@ -387,17 +374,18 @@ ipv4 = None ipv6 = None ip_address = self.next_ip_address() + ip_address_ip = ip_address.split('/')[0] version = 4 if netaddr.IPNetwork(ip_address).version == 6: version = 6 - opt_ip = "--ip %s" % ip_address + opt_ip = "--ip %s" % ip_address_ip if version == 4: ipv4 = ip_address else: - opt_ip = "--ip6 %s" % ip_address + opt_ip = "--ip6 %s" % ip_address_ip ipv6 = ip_address - cmd = "docker network connect %s " % opt_ip - cmd += "%s %s" % (self.name, ctn.docker_name()) + cmd = "docker network connect %s %s %s" % ( + opt_ip, self.name, ctn.docker_name()) self.execute(cmd, sudo=True) ctn.set_addr_info(bridge=self.name, ipv4=ipv4, ipv6=ipv6, ifname=name) @@ -496,18 +484,11 @@ else: return self.cmd.sudo(cmd, capture=capture) - def exec_on_ctn(self, cmd, capture=True, stream=False, detach=False): + def exec_on_ctn(self, cmd, capture=True, detach=False): name = self.docker_name() - if stream: - # This needs root permission. - dcli = Client(timeout=120, version='auto') - i = dcli.exec_create(container=name, cmd=cmd) - return dcli.exec_start(i['Id'], tty=True, - stream=stream, detach=detach) - else: - flag = '-d' if detach else '' - return self.dcexec('docker exec {0} {1} {2}'.format( - flag, name, cmd), capture=capture) + flag = '-d' if detach else '' + return self.dcexec('docker exec {0} {1} {2}'.format( + flag, name, cmd), capture=capture) def get_containers(self, allctn=False): cmd = 'docker ps --no-trunc=true' @@ -520,10 +501,7 @@ return containers def exist(self, allctn=False): - if self.docker_name() in self.get_containers(allctn=allctn): - return True - else: - return False + return self.docker_name() in self.get_containers(allctn=allctn) def run(self): c = CmdBuffer(' ') @@ -553,7 +531,7 @@ if not self.exist(allctn=False): return ctn_id = self.get_docker_id() - out = self.dcexec('docker stop -t 0 ' + ctn_id, retry=True) + out = self.dcexec('docker stop -t 0 %s' % ctn_id, retry=True) self.is_running = False return out @@ -562,7 +540,7 @@ if not self.exist(allctn=True): return ctn_id = self.get_docker_id() - out = self.dcexec('docker rm -f ' + ctn_id, retry=True) + out = self.dcexec('docker rm -f %s' % ctn_id, retry=True) self.is_running = False return out @@ -591,7 +569,7 @@ def get_pid(self): if self.is_running: - cmd = "docker inspect -f '{{.State.Pid}}' " + self.docker_name() + cmd = "docker inspect -f '{{.State.Pid}}' %s" % self.docker_name() return int(self.dcexec(cmd)) return -1 @@ -641,8 +619,8 @@ def __init__(self, name, asn, router_id, ctn_image_name=None): self.config_dir = TEST_BASE_DIR if TEST_PREFIX: - self.config_dir += '/' + TEST_PREFIX - self.config_dir += '/' + name + self.config_dir = os.path.join(self.config_dir, TEST_PREFIX) + self.config_dir = os.path.join(self.config_dir, name) self.asn = asn self.router_id = router_id self.peers = {} @@ -666,7 +644,8 @@ return w_time def add_peer(self, peer, bridge='', reload_config=True, v6=False, - peer_info={}): + peer_info=None): + peer_info = peer_info or {} self.peers[peer] = self.DEFAULT_PEER_ARG.copy() self.peers[peer].update(peer_info) peer_keys = sorted(self.peers[peer].keys()) @@ -709,15 +688,16 @@ self.reload_config() def disable_peer(self, peer): - raise Exception('implement disable_peer() method') + raise NotImplementedError() def enable_peer(self, peer): - raise Exception('implement enable_peer() method') + raise NotImplementedError() def log(self): return self.execute('cat {0}/*.log'.format(self.config_dir)) - def add_route(self, route, reload_config=True, route_info={}): + def add_route(self, route, reload_config=True, route_info=None): + route_info = route_info or {} self.routes[route] = self.DEFAULT_ROUTE_ARG.copy() self.routes[route].update(route_info) route_keys = sorted(self.routes[route].keys()) @@ -758,47 +738,46 @@ self.peers[peer]['policies'][typ] = policy def get_local_rib(self, peer, rf): - raise Exception('implement get_local_rib() method') + raise NotImplementedError() def get_global_rib(self, rf): - raise Exception('implement get_global_rib() method') + raise NotImplementedError() def get_neighbor_state(self, peer_id): - raise Exception('implement get_neighbor() method') + raise NotImplementedError() def get_reachablily(self, prefix, timeout=20): - version = netaddr.IPNetwork(prefix).version - addr = prefix.split('/')[0] - if version == 4: - ping_cmd = 'ping' - elif version == 6: - ping_cmd = 'ping6' - else: - raise Exception( - 'unsupported route family: {0}'.format(version)) - cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format( - ping_cmd, addr) - interval = 1 - count = 0 - while True: - res = self.exec_on_ctn(cmd) - LOG.info(res) - if '1 packets received' in res and '0% packet loss': - break - time.sleep(interval) - count += interval - if count >= timeout: - raise Exception('timeout') - return True + version = netaddr.IPNetwork(prefix).version + addr = prefix.split('/')[0] + if version == 4: + ping_cmd = 'ping' + elif version == 6: + ping_cmd = 'ping6' + else: + raise Exception( + 'unsupported route family: {0}'.format(version)) + cmd = '/bin/bash -c "/bin/{0} -c 1 -w 1 {1} | xargs echo"'.format( + ping_cmd, addr) + interval = 1 + count = 0 + while True: + res = self.exec_on_ctn(cmd) + LOG.info(res) + if '1 packets received' in res and '0% packet loss': + break + time.sleep(interval) + count += interval + if count >= timeout: + raise Exception('timeout') + return True def wait_for(self, expected_state, peer, timeout=120): interval = 1 count = 0 while True: state = self.get_neighbor_state(peer) - LOG.info("{0}'s peer {1} state: {2}".format(self.router_id, - peer.router_id, - state)) + LOG.info("%s's peer %s state: %s", + self.router_id, peer.router_id, state) if state == expected_state: return @@ -816,7 +795,7 @@ self.exec_on_ctn(cmd) def create_config(self): - raise Exception('implement create_config() method') + raise NotImplementedError() def reload_config(self): - raise Exception('implement reload_config() method') + raise NotImplementedError() diff -Nru ryu-4.9/ryu/tests/integrated/common/install_docker_test_pkg_common.sh ryu-4.15/ryu/tests/integrated/common/install_docker_test_pkg_common.sh --- ryu-4.9/ryu/tests/integrated/common/install_docker_test_pkg_common.sh 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/common/install_docker_test_pkg_common.sh 2017-07-02 11:08:32.000000000 +0000 @@ -4,7 +4,6 @@ function init_variables { APTLINE_DOCKER=0 DIR_BASE=/tmp - SUDO_PIP="" } function process_options { @@ -21,17 +20,20 @@ shift; ((i++)) DIR_BASE=$1 ;; - -s|--sudo-pip) - SUDO_PIP=sudo - ;; esac shift; ((i++)) done } +function install_pipework { + if ! which /usr/local/bin/pipework >/dev/null + then + sudo rm -rf $DIR_BASE/pipework + git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework + sudo install -m 0755 $DIR_BASE/pipework/pipework /usr/local/bin/pipework + fi +} + function install_depends_pkg { - sudo rm -rf $DIR_BASE/pipework - git clone https://github.com/jpetazzo/pipework.git $DIR_BASE/pipework - sudo install -m 0755 $DIR_BASE/pipework/pipework /usr/local/bin/pipework - $SUDO_PIP pip install docker-py pycrypto nsenter + install_pipework } diff -Nru ryu-4.9/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh ryu-4.15/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh --- ryu-4.9/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh 2017-07-02 11:08:32.000000000 +0000 @@ -7,7 +7,6 @@ init_variables process_options "$@" -SUDO_PIP="" sudo apt-get update install_depends_pkg diff -Nru ryu-4.9/ryu/tests/integrated/common/quagga.py ryu-4.15/ryu/tests/integrated/common/quagga.py --- ryu-4.9/ryu/tests/integrated/common/quagga.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/common/quagga.py 2017-07-02 11:08:32.000000000 +0000 @@ -19,6 +19,7 @@ from __future__ import absolute_import import logging +import os import netaddr @@ -39,7 +40,7 @@ self.zebra = zebra self._create_config_debian() - def run(self, wait=False): + def run(self, wait=False, w_time=WAIT_FOR_BOOT): w_time = super(QuaggaBGPContainer, self).run(wait=wait, w_time=self.WAIT_FOR_BOOT) return w_time @@ -56,9 +57,9 @@ read_next = False for line in out.split('\n'): + ibgp = False if line[:2] == '*>': line = line[2:] - ibgp = False if line[0] == 'i': line = line[1:] ibgp = True @@ -179,7 +180,7 @@ c << 'watchquagga_enable=yes' c << 'watchquagga_options=(--daemon)' with open('{0}/debian.conf'.format(self.config_dir), 'w') as f: - LOG.info('[{0}\'s new config]'.format(self.name)) + LOG.info("[%s's new config]", self.name) LOG.info(str(c)) f.writelines(str(c)) @@ -194,7 +195,7 @@ c << 'isisd=no' c << 'babeld=no' with open('{0}/daemons'.format(self.config_dir), 'w') as f: - LOG.info('[{0}\'s new config]'.format(self.name)) + LOG.info("[%s's new config]", self.name) LOG.info(str(c)) f.writelines(str(c)) @@ -263,7 +264,7 @@ c << 'log file {0}/bgpd.log'.format(self.SHARED_VOLUME) with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f: - LOG.info('[{0}\'s new config]'.format(self.name)) + LOG.info("[%s's new config]", self.name) LOG.info(str(c)) f.writelines(str(c)) @@ -278,12 +279,12 @@ c << '' with open('{0}/zebra.conf'.format(self.config_dir), 'w') as f: - LOG.info('[{0}\'s new config]'.format(self.name)) + LOG.info("[%s's new config]", self.name) LOG.info(str(c)) f.writelines(str(c)) def vtysh(self, cmd, config=True): - if type(cmd) is not list: + if not isinstance(cmd, list): cmd = [cmd] cmd = ' '.join("-c '{0}'".format(c) for c in cmd) if config: @@ -325,7 +326,7 @@ ctn_image_name, zebra) def create_config(self): - with open('{0}/bgpd.conf'.format(self.config_dir), 'w') as f: - LOG.info('[{0}\'s new config]'.format(self.name)) + with open(os.path.join(self.config_dir, 'bgpd.conf'), 'w') as f: + LOG.info("[%s's new config]", self.name) LOG.info(self.config) f.writelines(self.config) diff -Nru ryu-4.9/ryu/tests/integrated/common/ryubgp.py ryu-4.15/ryu/tests/integrated/common/ryubgp.py --- ryu-4.9/ryu/tests/integrated/common/ryubgp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/integrated/common/ryubgp.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,6 +16,7 @@ from __future__ import absolute_import import logging +import os import time from . import docker_base as base @@ -31,9 +32,9 @@ def __init__(self, name, asn, router_id, ctn_image_name): super(RyuBGPContainer, self).__init__(name, asn, router_id, ctn_image_name) - self.RYU_CONF = self.config_dir + '/ryu.conf' - self.SHARED_RYU_CONF = self.SHARED_VOLUME + '/ryu.conf' - self.SHARED_BGP_CONF = self.SHARED_VOLUME + '/bgp_conf.py' + self.RYU_CONF = os.path.join(self.config_dir, 'ryu.conf') + self.SHARED_RYU_CONF = os.path.join(self.SHARED_VOLUME, 'ryu.conf') + self.SHARED_BGP_CONF = os.path.join(self.SHARED_VOLUME, 'bgp_conf.py') self.shared_volumes.append((self.config_dir, self.SHARED_VOLUME)) def _create_config_ryu(self): @@ -145,8 +146,8 @@ }, }""" c << log_conf - with open(self.config_dir + '/bgp_conf.py', 'w') as f: - LOG.info("[%s's new config]" % self.name) + with open(os.path.join(self.config_dir, 'bgp_conf.py'), 'w') as f: + LOG.info("[%s's new config]", self.name) LOG.info(str(c)) f.writelines(str(c)) @@ -172,9 +173,10 @@ else: try_times = 1 cmd = "ryu-manager --verbose " + cmd += "--config-file %s " % self.SHARED_RYU_CONF cmd += "--bgp-app-config-file %s " % self.SHARED_BGP_CONF cmd += "ryu.services.protocols.bgp.application" - for i in range(try_times): + for _ in range(try_times): self.exec_on_ctn(cmd, detach=True) if self.is_running_ryu(): result = True @@ -191,7 +193,7 @@ try_times = 3 else: try_times = 1 - for i in range(try_times): + for _ in range(try_times): cmd = '/usr/bin/pkill ryu-manager -SIGTERM' self.exec_on_ctn(cmd) if not self.is_running_ryu(): Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/bgp4-update_ipv6.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/bgp4-update_ipv6.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_action_redirect.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_action_redirect.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_action_traffic_action.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_action_traffic_action.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_action_traffic_marking.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_action_traffic_marking.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_action_traffic_rate.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_action_traffic_rate.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_nlri_ipv4.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_nlri_ipv4.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_nlri_ipv6.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_nlri_ipv6.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_nlri_l2vpn.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_nlri_l2vpn.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_nlri_vpn4.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_nlri_vpn4.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/bgp4/flowspec_nlri_vpn6.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/bgp4/flowspec_nlri_vpn6.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/mrt/rib.20161101.0000_pick.bz2 and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/mrt/rib.20161101.0000_pick.bz2 differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/mrt/updates.20161101.0000.bz2 and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/mrt/updates.20161101.0000.bz2 differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/geneve_unknown.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/geneve_unknown.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/gre_full_options.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/gre_full_options.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/gre_no_option.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/gre_no_option.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/gre_nvgre_option.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/gre_nvgre_option.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/openflow_flowmod.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/openflow_flowmod.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/openflow_flowstats_req.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/openflow_flowstats_req.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/openflow_invalid_version.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/openflow_invalid_version.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/zebra_v2.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/zebra_v2.pcap differ Binary files /tmp/tmp18NPUf/Xtt0ckDb0e/ryu-4.9/ryu/tests/packet_data/pcap/zebra_v3.pcap and /tmp/tmp18NPUf/FFOfBqE2dc/ryu-4.15/ryu/tests/packet_data/pcap/zebra_v3.pcap differ diff -Nru ryu-4.9/ryu/tests/unit/app/test_ofctl_rest.py ryu-4.15/ryu/tests/unit/app/test_ofctl_rest.py --- ryu-4.9/ryu/tests/unit/app/test_ofctl_rest.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/app/test_ofctl_rest.py 2017-07-02 11:08:32.000000000 +0000 @@ -18,17 +18,17 @@ import functools import json import logging -from nose.tools import eq_ import os import sys import unittest -from webob.request import Request try: import mock # Python 2 except ImportError: from unittest import mock # Python 3 +from nose.tools import eq_ from ryu.app import ofctl_rest +from ryu.app.wsgi import Request from ryu.app.wsgi import WSGIApplication from ryu.controller.dpset import DPSet from ryu.ofproto import ofproto_protocol @@ -71,7 +71,7 @@ class Test_ofctl_rest(unittest.TestCase): def _test(self, name, dp, method, path, body): - print('processing %s ...' % name) + # print('processing %s ...' % name) dpset = DPSet() dpset._register(dp) @@ -104,13 +104,13 @@ this_dir = os.path.dirname(sys.modules[__name__].__file__) ofctl_rest_json_dir = os.path.join(this_dir, 'ofctl_rest_json/') - for ofp_ver in _ofp_vers.keys(): + for ofp_ver in _ofp_vers: # read a json file json_path = os.path.join(ofctl_rest_json_dir, ofp_ver + '.json') if os.path.exists(json_path): _test_cases = json.load(open(json_path)) else: - print("Skip to load test cases for %s" % ofp_ver) + # print("Skip to load test cases for %s" % ofp_ver) continue # add test @@ -120,7 +120,7 @@ body = test.get('body', {}) name = 'test_ofctl_rest_' + method + '_' + ofp_ver + '_' + path - print('adding %s ...' % name) + # print('adding %s ...' % name) f = functools.partial( Test_ofctl_rest._test, name=name, diff -Nru ryu-4.9/ryu/tests/unit/app/test_wsgi.py ryu-4.15/ryu/tests/unit/app/test_wsgi.py --- ryu-4.9/ryu/tests/unit/app/test_wsgi.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/app/test_wsgi.py 2017-07-02 11:08:32.000000000 +0000 @@ -20,10 +20,10 @@ import nose from nose.tools import eq_ -from webob.response import Response from ryu.app.wsgi import ControllerBase from ryu.app.wsgi import WSGIApplication +from ryu.app.wsgi import Response from ryu.app.wsgi import route from ryu.lib import dpid as dpidlib diff -Nru ryu-4.9/ryu/tests/unit/lib/ovs/test_vsctl.py ryu-4.15/ryu/tests/unit/lib/ovs/test_vsctl.py --- ryu-4.9/ryu/tests/unit/lib/ovs/test_vsctl.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/lib/ovs/test_vsctl.py 2017-07-02 11:08:32.000000000 +0000 @@ -83,7 +83,7 @@ def _docker_inspect_ip_addr(cls, container): return _run( 'docker inspect --format="{{.NetworkSettings.IPAddress}}" %s' % - container)[0] + container)[0].strip('"') @classmethod def _set_up_mn_container(cls): diff -Nru ryu-4.9/ryu/tests/unit/lib/test_import_module.py ryu-4.15/ryu/tests/unit/lib/test_import_module.py --- ryu-4.9/ryu/tests/unit/lib/test_import_module.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/lib/test_import_module.py 2017-07-02 11:08:32.000000000 +0000 @@ -44,9 +44,8 @@ eq_("this is ccc", ccc.name) ddd = import_module('./lib/test_mod/ddd/mod.py') # Note: When importing a module by filename, if module file name - # is duplicated, import_module returns a module instance which is - # imported before. - eq_("this is ccc", ddd.name) + # is duplicated, import_module reload (override) a module instance. + eq_("this is ddd", ddd.name) def test_import_same_module1(self): from ryu.tests.unit.lib.test_mod import eee as eee1 diff -Nru ryu-4.9/ryu/tests/unit/lib/test_mrtlib.py ryu-4.15/ryu/tests/unit/lib/test_mrtlib.py --- ryu-4.9/ryu/tests/unit/lib/test_mrtlib.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/lib/test_mrtlib.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,765 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import bz2 +import io +import logging +import os +import sys +import unittest + +try: + import mock # Python 2 +except ImportError: + from unittest import mock # Python 3 + +from nose.tools import eq_ +from nose.tools import ok_ + +from ryu.lib import addrconv +from ryu.lib import mrtlib +from ryu.lib.packet import bgp +from ryu.lib.packet import ospf +from ryu.utils import binary_str + + +LOG = logging.getLogger(__name__) + +MRT_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), '../../packet_data/mrt/') + + +class TestMrtlib(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib. + """ + + def test_reader(self): + files = [ + 'rib.20161101.0000_pick.bz2', + 'updates.20161101.0000.bz2', + ] + + for f in files: + # print('\n*** testing mrtlib.Reader with %s ...' % f) + counter = 0 + input_file = os.path.join(MRT_DATA_DIR, f) + for record in mrtlib.Reader(bz2.BZ2File(input_file, 'rb')): + # print('* No.%d\n%s' % (counter, record)) + ok_(not isinstance(record, mrtlib.UnknownMrtRecord)) + counter += 1 + + def test_writer(self): + files = [ + 'rib.20161101.0000_pick.bz2', + 'updates.20161101.0000.bz2', + ] + + for f in files: + # print('\n*** testing mrtlib.Writer with %s ...' % f) + input_file = os.path.join(MRT_DATA_DIR, f) + input_buf = bz2.BZ2File(input_file, 'rb').read() + input_records = list(mrtlib.Reader(bz2.BZ2File(input_file, 'rb'))) + + counter = 0 + f = io.BytesIO() + mrt_writer = mrtlib.Writer(f) + for record in input_records: + # print('* No.%d\n%s' % (counter, record)) + mrt_writer.write(record) + counter += 1 + + output_buf = f.getvalue() + + eq_(binary_str(input_buf), binary_str(output_buf)) + + mrt_writer.close() + + eq_(True, mrt_writer._f.closed) + + +class TestMrtlibMrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.MrtRecord. + """ + + def test_init_without_type_subtype(self): + type_ = mrtlib.MrtRecord.TYPE_TABLE_DUMP + subtype = mrtlib.TableDumpMrtRecord.SUBTYPE_AFI_IPv4 + + message = mrtlib.TableDumpAfiIPv4MrtMessage( + view_num=1, + seq_num=2, + prefix='192.168.1.0', + prefix_len=24, + status=1, + originated_time=0, + peer_ip='10.0.0.1', + peer_as=65000, + bgp_attributes=[], + ) + record = mrtlib.TableDumpMrtRecord(message) + + eq_(type_, record.type) + eq_(subtype, record.subtype) + + def test_parse_pre_with_type_et(self): + buf = ( + b'\x00\x00\x00\x00' # timestamp + b'\x00\x11\x00\x00' # type=TYPE_BGP4MP_ET(17), subtype + b'\x00\x00\x00\xaa' # length + ) + + required_len = mrtlib.MrtRecord.parse_pre(buf) + + eq_(0xaa + mrtlib.ExtendedTimestampMrtRecord.HEADER_SIZE, + required_len) + + +# Note: MrtCommonRecord is tested in TestMrtlibMrtRecord. +# class TestMrtlibMrtCommonRecord(unittest.TestCase): + + +class TestMrtlibExtendedTimestampMrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.ExtendedTimestampMrtRecord. + """ + + def test_parse_extended_header(self): + body = b'test' + buf = ( + b'\x11\x11\x11\x11' # ms_timestamp + + body + ) + + (headers, + rest) = mrtlib.ExtendedTimestampMrtRecord.parse_extended_header(buf) + + ok_(isinstance(headers, list)) + eq_(1, len(headers)) + eq_(0x11111111, headers[0]) + eq_(body, rest) + + def test_serialize(self): + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x22\x22\x33\x33' # type, subtype + b'\x00\x00\x00\x04' # length=len(body) + b'\x44\x44\x44\x44' # ms_timestamp + + body + ) + + message_mock = mock.MagicMock(spec=mrtlib.MrtMessage) + message_mock.serialize.return_value = body + + record = mrtlib.ExtendedTimestampMrtRecord( + message=message_mock, + timestamp=0x11111111, + type_=0x2222, subtype=0x3333, + ms_timestamp=0x44444444, + length=0x00000004, + ) + + output = record.serialize() + + eq_(buf, output) + + +class TestMrtlibUnknownMrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.UnknownMrtRecord. + """ + + def test_parse(self): + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x22\x22\x33\x33' # type, subtype + b'\x00\x00\x00\x04' # length=len(body) + + body + ) + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(0x2222, record.type) + eq_(0x3333, record.subtype) + eq_(0x00000004, record.length) + eq_(body, record.message.buf) + eq_(b'', rest) + + def test_serialize(self): + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x22\x22\x33\x33' # type, subtype + b'\x00\x00\x00\x04' # length=len(body) + + body + ) + + message = mrtlib.UnknownMrtMessage(buf=body) + record = mrtlib.UnknownMrtRecord( + message=message, + timestamp=0x11111111, + type_=0x2222, subtype=0x3333, + length=0x00000004, + ) + + output = record.serialize() + + eq_(buf, output) + + +class TestMrtlibOspf2MrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.Ospf2MrtRecord. + """ + + @mock.patch('ryu.lib.packet.ospf.ospf.parser') + def test_parse(self, mock_ospf_parser): + remote_ip = '10.0.0.1' + local_ip = '10.0.0.2' + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0b\x00\x00' # type=TYPE_OSPFv2(11), subtype + b'\x00\x00\x00\x0c' # length=len(remote_ip + local_ip + body) + + addrconv.ipv4.text_to_bin(remote_ip) # remote_ip + + addrconv.ipv4.text_to_bin(local_ip) # local_ip + + body # ospf_message + ) + + mock_ospf_message = mock.MagicMock(spec=ospf.OSPFMessage) + mock_ospf_parser.return_value = (mock_ospf_message, None, '') + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(mrtlib.MrtRecord.TYPE_OSPFv2, record.type) + eq_(0x0000, record.subtype) + eq_(0x0000000c, record.length) + eq_(remote_ip, record.message.remote_ip) + eq_(local_ip, record.message.local_ip) + eq_(mock_ospf_message, record.message.ospf_message) + eq_(b'', rest) + + def test_serialize(self): + remote_ip = '10.0.0.1' + local_ip = '10.0.0.2' + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0b\x00\x00' # type=TYPE_OSPFv2(11), subtype + b'\x00\x00\x00\x0c' # length=len(remote_ip + local_ip + body) + + addrconv.ipv4.text_to_bin(remote_ip) # remote_ip + + addrconv.ipv4.text_to_bin(local_ip) # local_ip + + body # ospf_message + ) + + mock_ospf_message = mock.MagicMock(spec=ospf.OSPFMessage) + mock_ospf_message.serialize.return_value = body + + message = mrtlib.Ospf2MrtMessage( + remote_ip=remote_ip, + local_ip=local_ip, + ospf_message=mock_ospf_message, + ) + record = mrtlib.Ospf2MrtRecord( + message=message, + timestamp=0x11111111, + # type_=None, + # subtype=None, + # length=None, + ) + + output = record.serialize() + + eq_(buf, output) + + +class TestMrtlibTableDumpMrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.TableDumpMrtRecord. + """ + + @mock.patch('ryu.lib.packet.bgp._PathAttribute.parser') + def test_parse_afi_ipv4(self, mock_bgp_attr_parser): + prefix = '10.0.0.0' + peer_ip = '172.16.0.1' + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0c\x00\x01' # type=TYPE_TABLE_DUMP(12), + # subtype=SUBTYPE_AFI_IPv4(1) + b'\x00\x00\x00\x1a' # length=26 + b'\x22\x22\x33\x33' # view_num, seq_num + + addrconv.ipv4.text_to_bin(prefix) + # prefix + b'\x18\x01' # prefix_len=24, status=1 + b'\x44\x44\x44\x44' # originated_time + + addrconv.ipv4.text_to_bin(peer_ip) + # peer_ip + b'\xfd\xe8\x00\x04' # peer_as=65000, attr_len=len(body) + + body # bgp_attributes + ) + + mock_bgp_attr = mock.MagicMock(spec=bgp._PathAttribute) + mock_bgp_attr_parser.return_value = (mock_bgp_attr, b'') + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(mrtlib.MrtRecord.TYPE_TABLE_DUMP, record.type) + eq_(mrtlib.TableDumpMrtRecord.SUBTYPE_AFI_IPv4, record.subtype) + eq_(0x0000001a, record.length) + eq_(0x2222, record.message.view_num) + eq_(0x3333, record.message.seq_num) + eq_(prefix, record.message.prefix) + eq_(24, record.message.prefix_len) + eq_(1, record.message.status) + eq_(0x44444444, record.message.originated_time) + eq_(peer_ip, record.message.peer_ip) + eq_(65000, record.message.peer_as) + eq_(0x0004, record.message.attr_len) + eq_([mock_bgp_attr], record.message.bgp_attributes) + eq_(b'', rest) + + def test_serialize_afi_ipv4(self): + prefix = '10.0.0.0' + peer_ip = '172.16.0.1' + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0c\x00\x01' # type=TYPE_TABLE_DUMP(12), + # subtype=SUBTYPE_AFI_IPv4(1) + b'\x00\x00\x00\x1a' # length=26 + b'\x22\x22\x33\x33' # view_num, seq_num + + addrconv.ipv4.text_to_bin(prefix) + # prefix + b'\x18\x01' # prefix_len=24, status=1 + b'\x44\x44\x44\x44' # originated_time + + addrconv.ipv4.text_to_bin(peer_ip) + # peer_ip + b'\xfd\xe8\x00\x04' # peer_as=65000, attr_len=len(body) + + body # bgp_attributes + ) + + mock_bgp_attr = mock.MagicMock(spec=bgp._PathAttribute) + mock_bgp_attr.serialize.return_value = body + + message = mrtlib.TableDumpAfiIPv4MrtMessage( + view_num=0x2222, + seq_num=0x3333, + prefix=prefix, + prefix_len=24, + status=1, + originated_time=0x44444444, + peer_ip=peer_ip, + peer_as=65000, + bgp_attributes=[mock_bgp_attr], + # attr_len=4 + ) + record = mrtlib.TableDumpMrtRecord( + message=message, + timestamp=0x11111111, + # type_=None, + # subtype=None, + # length=None, + ) + + output = record.serialize() + + eq_(buf, output) + + @mock.patch('ryu.lib.packet.bgp._PathAttribute.parser') + def test_parse_afi_ipv6(self, mock_bgp_attr_parser): + prefix = '2001:db8::1' + peer_ip = 'fe80::1' + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0c\x00\x02' # type=TYPE_TABLE_DUMP(12), + # subtype=SUBTYPE_AFI_IPv6(2) + b'\x00\x00\x00\x32' # length=50 + b'\x22\x22\x33\x33' # view_num, seq_num + + addrconv.ipv6.text_to_bin(prefix) + # prefix + b'\x40\x01' # prefix_len=64, status=1 + b'\x44\x44\x44\x44' # originated_time + + addrconv.ipv6.text_to_bin(peer_ip) + # peer_ip + b'\xfd\xe8\x00\x04' # peer_as=65000, attr_len=len(body) + + body # bgp_attributes + ) + + mock_bgp_attr = mock.MagicMock(spec=bgp._PathAttribute) + mock_bgp_attr_parser.return_value = (mock_bgp_attr, b'') + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(mrtlib.MrtRecord.TYPE_TABLE_DUMP, record.type) + eq_(mrtlib.TableDumpMrtRecord.SUBTYPE_AFI_IPv6, record.subtype) + eq_(0x00000032, record.length) + eq_(0x2222, record.message.view_num) + eq_(0x3333, record.message.seq_num) + eq_(prefix, record.message.prefix) + eq_(64, record.message.prefix_len) + eq_(1, record.message.status) + eq_(0x44444444, record.message.originated_time) + eq_(peer_ip, record.message.peer_ip) + eq_(65000, record.message.peer_as) + eq_(0x0004, record.message.attr_len) + eq_([mock_bgp_attr], record.message.bgp_attributes) + eq_(b'', rest) + + def test_serialize_afi_ipv6(self): + prefix = '2001:db8::1' + peer_ip = 'fe80::1' + body = b'test' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0c\x00\x02' # type=TYPE_TABLE_DUMP(12), + # subtype=SUBTYPE_AFI_IPv6(2) + b'\x00\x00\x00\x32' # length=50 + b'\x22\x22\x33\x33' # view_num, seq_num + + addrconv.ipv6.text_to_bin(prefix) + # prefix + b'\x40\x01' # prefix_len=64, status=1 + b'\x44\x44\x44\x44' # originated_time + + addrconv.ipv6.text_to_bin(peer_ip) + # peer_ip + b'\xfd\xe8\x00\x04' # peer_as=65000, attr_len=len(body) + + body # bgp_attributes + ) + + mock_bgp_attr = mock.MagicMock(spec=bgp._PathAttribute) + mock_bgp_attr.serialize.return_value = body + + message = mrtlib.TableDumpAfiIPv6MrtMessage( + view_num=0x2222, + seq_num=0x3333, + prefix=prefix, + prefix_len=64, + status=1, + originated_time=0x44444444, + peer_ip=peer_ip, + peer_as=65000, + bgp_attributes=[mock_bgp_attr], + # attr_len=4 + ) + record = mrtlib.TableDumpMrtRecord( + message=message, + timestamp=0x11111111, + # type_=None, + # subtype=None, + # length=None, + ) + + output = record.serialize() + + eq_(buf, output) + + +class TestMrtlibTableDump2MrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.TableDump2MrtRecord. + """ + + # Note: The classes corresponding to the following subtypes are + # tested in TestMrtlibMrtRecord. + # - SUBTYPE_PEER_INDEX_TABLE = 1 + # - SUBTYPE_RIB_IPV4_UNICAST = 2 + # - SUBTYPE_RIB_IPV4_MULTICAST = 3 + # - SUBTYPE_RIB_IPV6_UNICAST = 4 + # - SUBTYPE_RIB_IPV6_MULTICAST = 5 + + @mock.patch('ryu.lib.mrtlib.MrtRibEntry.parse') + @mock.patch('ryu.lib.packet.bgp.BGPNLRI.parser') + def test_parse_rib_generic(self, mock_nlri_parser, mock_rib_entry_parser): + nlri_bin = b'nlri' # 4 bytes + rib_entries_bin = b'ribs' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0d\x00\x06' # type=TYPE_TABLE_DUMP_V2(13), + # subtype=SUBTYPE_RIB_GENERIC(6) + b'\x00\x00\x00\x11' # length=17 + b'\x22\x22\x22\x22' # seq_num + b'\x33\x33\x44' # afi, safi + + nlri_bin + # nlri + b'\x00\x01' # entry_count + + rib_entries_bin # rib_entries + ) + buf_entries = ( + b'\x00\x01' # entry_count + + rib_entries_bin # rib_entries + ) + + mock_bgp_nlri = mock.MagicMock(spec=bgp._AddrPrefix) + mock_nlri_parser.return_value = (mock_bgp_nlri, buf_entries) + + mock_rib_entry = mock.MagicMock(spec=mrtlib.MrtRibEntry) + mock_rib_entry_parser.return_value = (mock_rib_entry, b'') + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(mrtlib.MrtRecord.TYPE_TABLE_DUMP_V2, record.type) + eq_(mrtlib.TableDump2MrtRecord.SUBTYPE_RIB_GENERIC, record.subtype) + eq_(0x00000011, record.length) + eq_(0x22222222, record.message.seq_num) + eq_(0x3333, record.message.afi) + eq_(0x44, record.message.safi) + eq_(mock_bgp_nlri, record.message.nlri) + eq_(0x0001, record.message.entry_count) + eq_([mock_rib_entry], record.message.rib_entries) + eq_(b'', rest) + + def test_serialize_rib_generic(self): + nlri_bin = b'nlri' # 4 bytes + rib_entries_bin = b'ribs' # 4 bytes + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x0d\x00\x06' # type=TYPE_TABLE_DUMP_V2(13), + # subtype=SUBTYPE_RIB_GENERIC(6) + b'\x00\x00\x00\x11' # length=17 + b'\x22\x22\x22\x22' # seq_num + b'\x33\x33\x44' # afi, safi + + nlri_bin + # nlri + b'\x00\x01' # entry_count + + rib_entries_bin # rib_entries + ) + + mock_bgp_nlri = mock.MagicMock(spec=bgp._AddrPrefix) + mock_bgp_nlri.serialize.return_value = nlri_bin + + mock_rib_entry = mock.MagicMock(spec=mrtlib.MrtRibEntry) + mock_rib_entry.serialize.return_value = rib_entries_bin + + message = mrtlib.TableDump2RibGenericMrtMessage( + seq_num=0x22222222, + afi=0x3333, + safi=0x44, + nlri=mock_bgp_nlri, + rib_entries=[mock_rib_entry], + # entry_count=1, + ) + record = mrtlib.TableDump2MrtRecord( + message=message, + timestamp=0x11111111, + # type_=None, + # subtype=None, + # length=None, + ) + + output = record.serialize() + + eq_(buf, output) + + +class TestMrtlibMrtPeer(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.MrtPeer. + """ + + def test_parse_two_octet_as(self): + bgp_id = '1.1.1.1' + ip_addr = '10.0.0.1' + buf = ( + b'\x00' # type + + addrconv.ipv4.text_to_bin(bgp_id) # bgp_id + + addrconv.ipv4.text_to_bin(ip_addr) + # ip_addr + b'\xfd\xe8' # as_num + ) + + peer, rest = mrtlib.MrtPeer.parse(buf) + + eq_(0, peer.type) + eq_(bgp_id, peer.bgp_id) + eq_(ip_addr, peer.ip_addr) + eq_(65000, peer.as_num) + eq_(b'', rest) + + def test_serialize_two_octet_as(self): + bgp_id = '1.1.1.1' + ip_addr = '10.0.0.1' + buf = ( + b'\x00' # type + + addrconv.ipv4.text_to_bin(bgp_id) # bgp_id + + addrconv.ipv4.text_to_bin(ip_addr) + # ip_addr + b'\xfd\xe8' # as_num + ) + + peer = mrtlib.MrtPeer( + bgp_id=bgp_id, + ip_addr=ip_addr, + as_num=65000, + # type_=0, + ) + + output = peer.serialize() + + eq_(buf, output) + + +class TestMrtlibBgp4MpMrtRecord(unittest.TestCase): + """ + Test case for ryu.lib.mrtlib.Bgp4MpMrtRecord. + """ + + # Note: The classes corresponding to the following subtypes are + # tested in TestMrtlibMrtRecord. + # - SUBTYPE_BGP4MP_MESSAGE = 1 + # - SUBTYPE_BGP4MP_MESSAGE_AS4 = 4 + # - SUBTYPE_BGP4MP_STATE_CHANGE_AS4 = 5 + # - SUBTYPE_BGP4MP_MESSAGE_LOCAL = 6 + # - SUBTYPE_BGP4MP_MESSAGE_AS4_LOCAL = 7 + + def test_parse_state_change_afi_ipv4(self): + peer_ip = '10.0.0.1' + local_ip = '10.0.0.2' + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x10\x00\x00' # type=TYPE_BGP4MP(16), + # subtype=SUBTYPE_BGP4MP_STATE_CHANGE(0) + b'\x00\x00\x00\x14' # length=20 + b'\xfd\xe9\xfd\xea' # peer_as=65001, local_as=65002 + b'\x22\x22\x00\x01' # if_index, addr_family=AFI_IPv4(1) + + addrconv.ipv4.text_to_bin(peer_ip) # peer_ip + + addrconv.ipv4.text_to_bin(local_ip) + # local_ip + b'\x00\x01\x00\x02' # old_state=STATE_IDLE(1), + # new_state=STATE_CONNECT(2) + ) + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(mrtlib.MrtRecord.TYPE_BGP4MP, record.type) + eq_(mrtlib.Bgp4MpMrtRecord.SUBTYPE_BGP4MP_STATE_CHANGE, record.subtype) + eq_(0x00000014, record.length) + eq_(65001, record.message.peer_as) + eq_(65002, record.message.local_as) + eq_(0x2222, record.message.if_index) + eq_(mrtlib.Bgp4MpStateChangeMrtMessage.AFI_IPv4, + record.message.afi) + eq_(mrtlib.Bgp4MpStateChangeMrtMessage.STATE_IDLE, + record.message.old_state) + eq_(mrtlib.Bgp4MpStateChangeMrtMessage.STATE_CONNECT, + record.message.new_state) + eq_(b'', rest) + + def test_serialize_state_change_afi_ipv4(self): + peer_ip = '10.0.0.1' + local_ip = '10.0.0.2' + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x10\x00\x00' # type=TYPE_BGP4MP(16), + # subtype=SUBTYPE_BGP4MP_STATE_CHANGE(0) + b'\x00\x00\x00\x14' # length=20 + b'\xfd\xe9\xfd\xea' # peer_as=65001, local_as=65002 + b'\x22\x22\x00\x01' # if_index, addr_family=AFI_IPv4(1) + + addrconv.ipv4.text_to_bin(peer_ip) # peer_ip + + addrconv.ipv4.text_to_bin(local_ip) + # local_ip + b'\x00\x01\x00\x02' # old_state=STATE_IDLE(1), + # new_state=STATE_CONNECT(2) + ) + + message = mrtlib.Bgp4MpStateChangeMrtMessage( + peer_as=65001, + local_as=65002, + if_index=0x2222, + peer_ip=peer_ip, + local_ip=local_ip, + old_state=mrtlib.Bgp4MpStateChangeMrtMessage.STATE_IDLE, + new_state=mrtlib.Bgp4MpStateChangeMrtMessage.STATE_CONNECT, + # afi=mrtlib.Bgp4MpStateChangeMrtMessage.AFI_IPv4, + ) + record = mrtlib.Bgp4MpMrtRecord( + message=message, + timestamp=0x11111111, + # type_=None, + # subtype=None, + # length=None, + ) + + output = record.serialize() + + eq_(buf, output) + + def test_parse_state_change_afi_ipv6(self): + peer_ip = 'fe80::1' + local_ip = 'fe80::2' + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x10\x00\x00' # type=TYPE_BGP4MP(16), + # subtype=SUBTYPE_BGP4MP_STATE_CHANGE(0) + b'\x00\x00\x00\x2c' # length=44 + b'\xfd\xe9\xfd\xea' # peer_as=65001, local_as=65002 + b'\x22\x22\x00\x02' # if_index, addr_family=AFI_IPv6(2) + + addrconv.ipv6.text_to_bin(peer_ip) # peer_ip + + addrconv.ipv6.text_to_bin(local_ip) + # local_ip + b'\x00\x01\x00\x02' # old_state=STATE_IDLE(1), + # new_state=STATE_CONNECT(2) + ) + + (record, rest) = mrtlib.MrtRecord.parse(buf) + + eq_(0x11111111, record.timestamp) + eq_(mrtlib.MrtRecord.TYPE_BGP4MP, record.type) + eq_(mrtlib.Bgp4MpMrtRecord.SUBTYPE_BGP4MP_STATE_CHANGE, record.subtype) + eq_(0x0000002c, record.length) + eq_(65001, record.message.peer_as) + eq_(65002, record.message.local_as) + eq_(0x2222, record.message.if_index) + eq_(mrtlib.Bgp4MpStateChangeMrtMessage.AFI_IPv6, + record.message.afi) + eq_(mrtlib.Bgp4MpStateChangeMrtMessage.STATE_IDLE, + record.message.old_state) + eq_(mrtlib.Bgp4MpStateChangeMrtMessage.STATE_CONNECT, + record.message.new_state) + eq_(b'', rest) + + def test_serialize_state_change_afi_ipv6(self): + peer_ip = 'fe80::1' + local_ip = 'fe80::2' + buf = ( + b'\x11\x11\x11\x11' # timestamp + b'\x00\x10\x00\x00' # type=TYPE_BGP4MP(16), + # subtype=SUBTYPE_BGP4MP_STATE_CHANGE(0) + b'\x00\x00\x00\x2c' # length=44 + b'\xfd\xe9\xfd\xea' # peer_as=65001, local_as=65002 + b'\x22\x22\x00\x02' # if_index, addr_family=AFI_IPv6(2) + + addrconv.ipv6.text_to_bin(peer_ip) # peer_ip + + addrconv.ipv6.text_to_bin(local_ip) + # local_ip + b'\x00\x01\x00\x02' # old_state=STATE_IDLE(1), + # new_state=STATE_CONNECT(2) + ) + + message = mrtlib.Bgp4MpStateChangeMrtMessage( + peer_as=65001, + local_as=65002, + if_index=0x2222, + peer_ip=peer_ip, + local_ip=local_ip, + old_state=mrtlib.Bgp4MpStateChangeMrtMessage.STATE_IDLE, + new_state=mrtlib.Bgp4MpStateChangeMrtMessage.STATE_CONNECT, + # afi=mrtlib.Bgp4MpStateChangeMrtMessage.AFI_IPv4, + ) + record = mrtlib.Bgp4MpMrtRecord( + message=message, + timestamp=0x11111111, + # type_=None, + # subtype=None, + # length=None, + ) + + output = record.serialize() + + eq_(buf, output) diff -Nru ryu-4.9/ryu/tests/unit/lib/test_ofctl_string.py ryu-4.15/ryu/tests/unit/lib/test_ofctl_string.py --- ryu-4.9/ryu/tests/unit/lib/test_ofctl_string.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/lib/test_ofctl_string.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,151 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from ryu.lib import ofctl_string +from ryu.ofproto import ofproto_parser +from ryu.ofproto.ofproto_protocol import ProtocolDesc +from ryu.ofproto import ofproto_v1_5 + + +class Test_OfctlString(unittest.TestCase): + """Test cases for ryu.ofproto.ofp_instruction_from_str. + """ + + def __init__(self, methodName): + print('init %s' % methodName) + self.fake_dp_of15 = ProtocolDesc(ofproto_v1_5.OFP_VERSION) + self.maxDiff = None + super(Test_OfctlString, self).__init__(methodName) + + def _test_str(self, dp, ofctl_str, *jsondict): + json = ofctl_string.ofp_instruction_from_str( + ofproto_v1_5, ofctl_str) + inst = ofproto_parser.ofp_instruction_from_jsondict(dp, json) + self.assertEqual(len(inst), len(jsondict)) + for i in range(len(inst)): + self.assertEqual(jsondict[i], inst[i].to_jsondict()) + + def test_drop(self): + inst = ofctl_string.ofp_instruction_from_str( + ofproto_v1_5, 'drop') + self.assertEqual(inst, []) + + def test_conjunction(self): + self._test_str(self.fake_dp_of15, + 'conjunction(0x234, 1/3),conjunction(0xdea, 2/2)', + {'OFPInstructionActions': { + 'actions': [ + {'NXActionConjunction': {'clause': 0, + 'experimenter': 8992, + 'id': 0x234, + 'len': None, + 'n_clauses': 3, + 'subtype': 34, + 'type': 65535}}, + {'NXActionConjunction': {'clause': 1, + 'experimenter': 8992, + 'id': 0xdea, + 'len': None, + 'n_clauses': 2, + 'subtype': 34, + 'type': 65535}}], + 'type': 4}}) + + def test_ct(self): + self._test_str(self.fake_dp_of15, + 'ct(commit)', + {'OFPInstructionActions': { + 'actions': [{'NXActionCT': {'actions': [], + 'alg': 0, + 'experimenter': 8992, + 'flags': 1, + 'len': None, + 'recirc_table': 255, + 'subtype': 35, + 'type': 65535, + 'zone_ofs_nbits': 0, + 'zone_src': u''}}], + 'type': 4}}) + + def test_ct_2(self): + self._test_str(self.fake_dp_of15, + 'ct(commit,zone=NXM_NX_REG8[0..15],' + 'exec(set_field:1->ct_mark))', + {'OFPInstructionActions': { + 'actions': [{'NXActionCT': { + 'actions': [ + {'OFPActionSetField': { + 'field': {'OXMTlv': {'field': 'ct_mark', + 'mask': None, + 'value': 1}}, + 'len': 8, + 'type': 25}}], + 'alg': 0, + 'experimenter': 8992, + 'flags': 1, + 'len': None, + 'recirc_table': 255, + 'subtype': 35, + 'type': 65535, + 'zone_ofs_nbits': 15, + 'zone_src': u'reg8'}}], + 'type': 4}}) + + def test_resubmit(self): + self._test_str(self.fake_dp_of15, + 'resubmit(,10)', + {'OFPInstructionActions': + {'actions': [{'NXActionResubmitTable': { + 'experimenter': 8992, + 'in_port': 65528, + 'len': None, + 'subtype': 14, + 'table_id': 10, + 'type': 65535}}], + 'type': 4}}) + + def test_set_field(self): + self._test_str(self.fake_dp_of15, + 'set_field:10/0xff->tun_id', + {'OFPInstructionActions': + {'actions': [{'OFPActionSetField': { + 'field': {'OXMTlv': {'field': 'tunnel_id', + 'mask': 255, + 'value': 10}}, + 'len': 8, + 'type': 25}}], + 'type': 4}}) + + def test_pop_vlan(self): + self._test_str(self.fake_dp_of15, + 'pop_vlan', + {'OFPInstructionActions': + {'actions': [{'OFPActionPopVlan': {'len': 8, + 'type': 18}}], + 'type': 4}}) + + def test_multi(self): + self._test_str(self.fake_dp_of15, + 'pop_vlan,goto_table:33', + {'OFPInstructionActions': + {'actions': [{'OFPActionPopVlan': {'len': 8, + 'type': 18}}], + 'type': 4}}, + {'OFPInstructionGotoTable': + {'len': 8, + 'table_id': 33, + 'type': 1}}) diff -Nru ryu-4.9/ryu/tests/unit/packet/test_bgp.py ryu-4.15/ryu/tests/unit/packet/test_bgp.py --- ryu-4.9/ryu/tests/unit/packet/test_bgp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/packet/test_bgp.py 2017-07-02 11:08:32.000000000 +0000 @@ -44,6 +44,99 @@ bgp.BGPPathAttributePmsiTunnel.TYPE_INGRESS_REPLICATION ) +RULES_BASE = [ + # port='>=8000' + bgp.FlowSpecPort( + operator=(bgp.FlowSpecPort.GT | bgp.FlowSpecPort.EQ), + value=8000), + # port='&<=9000' + bgp.FlowSpecPort( + operator=(bgp.FlowSpecPort.AND | bgp.FlowSpecPort.LT | + bgp.FlowSpecPort.EQ), + value=9000), + # port='==80' + bgp.FlowSpecPort(operator=bgp.FlowSpecPort.EQ, value=80), + # dst_port=8080 + bgp.FlowSpecDestPort(operator=bgp.FlowSpecDestPort.EQ, value=8080), + # dst_port='>9000' + bgp.FlowSpecDestPort(operator=bgp.FlowSpecDestPort.GT, value=9000), + # dst_port='&<9050' + bgp.FlowSpecDestPort( + operator=(bgp.FlowSpecDestPort.AND | bgp.FlowSpecDestPort.LT), + value=9050), + # dst_port='<=1000' + bgp.FlowSpecDestPort( + operator=(bgp.FlowSpecDestPort.LT | bgp.FlowSpecDestPort.EQ), + value=1000), + # src_port='<=9090' + bgp.FlowSpecSrcPort( + operator=(bgp.FlowSpecSrcPort.LT | bgp.FlowSpecSrcPort.EQ), + value=9090), + # src_port='& >=9080' + bgp.FlowSpecSrcPort( + operator=(bgp.FlowSpecSrcPort.AND | bgp.FlowSpecSrcPort.GT | + bgp.FlowSpecSrcPort.EQ), + value=9080), + # src_port='<10100' + bgp.FlowSpecSrcPort( + operator=bgp.FlowSpecSrcPort.LT, value=10100), + # src_port='>10000' + bgp.FlowSpecSrcPort( + operator=(bgp.FlowSpecSrcPort.AND | bgp.FlowSpecSrcPort.GT), + value=10000), + # icmp_type=0 + bgp.FlowSpecIcmpType(operator=bgp.FlowSpecIcmpType.EQ, value=0), + # icmp_code=6 + bgp.FlowSpecIcmpCode(operator=bgp.FlowSpecIcmpCode.EQ, value=6), + # tcp_flags='ACK+FIN' + bgp.FlowSpecTCPFlags( + operator=0, # Partial match + value=(bgp.FlowSpecTCPFlags.SYN | bgp.FlowSpecTCPFlags.ACK)), + # tcp_flags='&!=URGENT' + bgp.FlowSpecTCPFlags( + operator=(bgp.FlowSpecTCPFlags.AND | bgp.FlowSpecTCPFlags.NOT), + value=bgp.FlowSpecTCPFlags.URGENT), + # packet_len=1000 + bgp.FlowSpecPacketLen( + operator=bgp.FlowSpecPacketLen.EQ, value=1000), + # packet_len=1100 + bgp.FlowSpecPacketLen( + operator=(bgp.FlowSpecTCPFlags.AND | bgp.FlowSpecPacketLen.EQ), + value=1100), + # dscp=22 + bgp.FlowSpecDSCP(operator=bgp.FlowSpecDSCP.EQ, value=22), + # dscp=24 + bgp.FlowSpecDSCP(operator=bgp.FlowSpecDSCP.EQ, value=24), +] + +RULES_L2VPN_BASE = [ + # ether_type=0x0800 + bgp.FlowSpecEtherType(operator=bgp.FlowSpecEtherType.EQ, value=0x0800), + # source_mac='12:34:56:78:90:AB' + bgp.FlowSpecSourceMac(addr='12:34:56:78:90:AB', length=6), + # dest_mac='DE:EF:C0:FF:EE:DD' + bgp.FlowSpecDestinationMac(addr='BE:EF:C0:FF:EE:DD', length=6), + # llc_dsap=0x42 + bgp.FlowSpecLLCDSAP(operator=bgp.FlowSpecLLCDSAP.EQ, value=0x42), + # llc_ssap=0x42 + bgp.FlowSpecLLCSSAP(operator=bgp.FlowSpecLLCSSAP.EQ, value=0x42), + # llc_control=100 + bgp.FlowSpecLLCControl(operator=bgp.FlowSpecLLCControl.EQ, value=100), + # snap=0x12345 + bgp.FlowSpecSNAP(operator=bgp.FlowSpecSNAP.EQ, value=0x12345), + # vlan_id='>4000' + bgp.FlowSpecVLANID(operator=bgp.FlowSpecVLANID.GT, value=4000), + # vlan_cos='>=3' + bgp.FlowSpecVLANCoS( + operator=(bgp.FlowSpecVLANCoS.GT | bgp.FlowSpecVLANCoS.EQ), value=3), + # inner_vlan_id='<3000' + bgp.FlowSpecInnerVLANID(operator=bgp.FlowSpecInnerVLANID.LT, value=3000), + # inner_vlan_cos='<=5' + bgp.FlowSpecInnerVLANCoS( + operator=(bgp.FlowSpecInnerVLANCoS.LT | bgp.FlowSpecInnerVLANCoS.EQ), + value=5), +] + class Test_bgp(unittest.TestCase): """ Test case for ryu.lib.packet.bgp @@ -115,6 +208,17 @@ mp_nlri2 = [ bgp.LabelledIPAddrPrefix(24, '192.168.0.0', labels=[1, 2, 3]) ] + mp_nlri_v6 = [ + bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:1111::', + route_dist='200:200', + labels=[1, 2, 3]), + bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:2222::', + route_dist='10.0.0.1:10000', + labels=[5, 6, 7, 8]), + ] + mp_nlri2_v6 = [ + bgp.LabelledIP6AddrPrefix(64, '2001:db8:3333::', labels=[1, 2, 3]) + ] communities = [ bgp.BGP_COMMUNITY_NO_EXPORT, bgp.BGP_COMMUNITY_NO_ADVERTISE, @@ -133,7 +237,11 @@ bgp.BGPEvpnMacMobilityExtendedCommunity( subtype=0, flags=0xff, sequence_number=0x11223344), bgp.BGPEvpnEsiLabelExtendedCommunity( - subtype=1, flags=0xff, esi_label=0x112233), + subtype=1, flags=0xff, label=b'\xFF\xFF\xFF'), + bgp.BGPEvpnEsiLabelExtendedCommunity( + subtype=1, flags=0xff, mpls_label=0xfffff), + bgp.BGPEvpnEsiLabelExtendedCommunity( + subtype=1, flags=0xff, vni=0xffffff), bgp.BGPEvpnEsImportRTExtendedCommunity( subtype=2, es_import="aa:bb:cc:dd:ee:ff"), bgp.BGPUnknownExtendedCommunity(type_=99, value=b'abcdefg'), @@ -187,6 +295,13 @@ bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_LABEL, next_hop='1.1.1.1', nlri=mp_nlri2), + bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_VPN, + next_hop=['2001:db8::1'], + nlri=mp_nlri_v6), + bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_LABEL, + next_hop=['2001:db8::1', + 'fe80::1'], + nlri=mp_nlri2_v6), bgp.BGPPathAttributeMpUnreachNLRI(afi=afi.IP, safi=safi.MPLS_VPN, withdrawn_routes=mp_nlri), bgp.BGPPathAttributeUnknown(flags=0, type_=100, value=300 * b'bar') @@ -261,6 +376,15 @@ 'evpn_nlri_inc_multi_eth_tag', 'evpn_nlri_eth_seg', 'evpn_nlri_ip_prefix', + 'flowspec_nlri_ipv4', + 'flowspec_nlri_vpn4', + 'flowspec_nlri_ipv6', + 'flowspec_nlri_vpn6', + 'flowspec_nlri_l2vpn', + 'flowspec_action_traffic_rate', + 'flowspec_action_traffic_action', + 'flowspec_action_redirect', + 'flowspec_action_traffic_marking', ] for f in files: @@ -277,6 +401,33 @@ eq_(buf, pkt.data, "b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data))) + def test_vlan_action_parser(self): + action = bgp.BGPFlowSpecVlanActionCommunity( + actions_1=(bgp.BGPFlowSpecVlanActionCommunity.POP | + bgp.BGPFlowSpecVlanActionCommunity.SWAP), + vlan_1=3000, + cos_1=3, + actions_2=bgp.BGPFlowSpecVlanActionCommunity.PUSH, + vlan_2=4000, + cos_2=2, + ) + binmsg = action.serialize() + msg, rest = bgp.BGPFlowSpecVlanActionCommunity.parse(binmsg) + eq_(str(action), str(msg)) + eq_(rest, b'') + + def test_tpid_action_parser(self): + action = bgp.BGPFlowSpecTPIDActionCommunity( + actions=(bgp.BGPFlowSpecTPIDActionCommunity.TI | + bgp.BGPFlowSpecTPIDActionCommunity.TO), + tpid_1=5, + tpid_2=6, + ) + binmsg = action.serialize() + msg, rest = bgp.BGPFlowSpecTPIDActionCommunity.parse(binmsg) + eq_(str(action), str(msg)) + eq_(rest, b'') + def test_json1(self): opt_param = [bgp.BGPOptParamCapabilityUnknown(cap_code=200, cap_value=b'hoge'), @@ -311,6 +462,20 @@ route_dist='10.0.0.1:10000', labels=[5, 6, 7, 8]), ] + mp_nlri2 = [ + bgp.LabelledIPAddrPrefix(24, '192.168.0.0', labels=[1, 2, 3]) + ] + mp_nlri_v6 = [ + bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:1111::', + route_dist='200:200', + labels=[1, 2, 3]), + bgp.LabelledVPNIP6AddrPrefix(64, '2001:db8:2222::', + route_dist='10.0.0.1:10000', + labels=[5, 6, 7, 8]), + ] + mp_nlri2_v6 = [ + bgp.LabelledIP6AddrPrefix(64, '2001:db8:3333::', labels=[1, 2, 3]) + ] communities = [ bgp.BGP_COMMUNITY_NO_EXPORT, bgp.BGP_COMMUNITY_NO_ADVERTISE, @@ -329,7 +494,11 @@ bgp.BGPEvpnMacMobilityExtendedCommunity( subtype=0, flags=0xff, sequence_number=0x11223344), bgp.BGPEvpnEsiLabelExtendedCommunity( - subtype=1, flags=0xff, esi_label=0x112233), + subtype=1, flags=0xff, label=b'\xFF\xFF\xFF'), + bgp.BGPEvpnEsiLabelExtendedCommunity( + subtype=1, flags=0xff, mpls_label=0xfffff), + bgp.BGPEvpnEsiLabelExtendedCommunity( + subtype=1, flags=0xff, vni=0xffffff), bgp.BGPEvpnEsImportRTExtendedCommunity( subtype=2, es_import="aa:bb:cc:dd:ee:ff"), bgp.BGPUnknownExtendedCommunity(type_=99, value=b'abcdefg'), @@ -378,6 +547,16 @@ bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_VPN, next_hop='1.1.1.1', nlri=mp_nlri), + bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP, safi=safi.MPLS_LABEL, + next_hop='1.1.1.1', + nlri=mp_nlri2), + bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_VPN, + next_hop=['2001:db8::1'], + nlri=mp_nlri_v6), + bgp.BGPPathAttributeMpReachNLRI(afi=afi.IP6, safi=safi.MPLS_LABEL, + next_hop=['2001:db8::1', + 'fe80::1'], + nlri=mp_nlri2_v6), bgp.BGPPathAttributeMpUnreachNLRI(afi=afi.IP, safi=safi.MPLS_VPN, withdrawn_routes=mp_nlri), bgp.BGPPathAttributeUnknown(flags=0, type_=100, value=300 * b'bar') @@ -392,3 +571,243 @@ jsondict = msg1.to_jsondict() msg2 = bgp.BGPUpdate.from_jsondict(jsondict['BGPUpdate']) eq_(str(msg1), str(msg2)) + + def test_flowspec_user_interface_ipv4(self): + rules = RULES_BASE + [ + # dst_prefix='10.0.0.0/24 + bgp.FlowSpecDestPrefix(addr='10.0.0.0', length=24), + # src_prefix='20.0.0.1/24' + bgp.FlowSpecSrcPrefix(addr='20.0.0.0', length=24), + # ip_proto='6' + bgp.FlowSpecIPProtocol( + operator=bgp.FlowSpecIPProtocol.EQ, value=6), + # fragment='LF' + bgp.FlowSpecFragment( + operator=0, # Partial match + value=bgp.FlowSpecFragment.LF), + # fragment='==FF' + bgp.FlowSpecFragment( + operator=bgp.FlowSpecFragment.MATCH, + value=bgp.FlowSpecFragment.FF), + # fragment='&==ISF' + bgp.FlowSpecFragment( + operator=(bgp.FlowSpecFragment.AND | + bgp.FlowSpecFragment.MATCH), + value=bgp.FlowSpecFragment.ISF), + # fragment='!=DF' + bgp.FlowSpecFragment( + operator=bgp.FlowSpecFragment.NOT, + value=bgp.FlowSpecFragment.DF) + ] + + msg = bgp.FlowSpecIPv4NLRI.from_user( + dst_prefix='10.0.0.0/24', + src_prefix='20.0.0.0/24', + ip_proto='6', + port='>=8000 & <=9000 | ==80', + dst_port='8080 >9000&<9050 | <=1000', + src_port='<=9090 & >=9080 <10100 & >10000', + icmp_type=0, + icmp_code=6, + tcp_flags='SYN+ACK & !=URGENT', + packet_len='1000 & 1100', + dscp='22 24', + fragment='LF ==FF&==ISF | !=DF') + msg2 = bgp.FlowSpecIPv4NLRI(rules=rules) + binmsg = msg.serialize() + binmsg2 = msg2.serialize() + eq_(str(msg), str(msg2)) + eq_(binary_str(binmsg), binary_str(binmsg2)) + msg3, rest = bgp.FlowSpecIPv4NLRI.parser(binmsg) + eq_(str(msg), str(msg3)) + eq_(rest, b'') + + def test_flowspec_user_interface_vpv4(self): + rules = RULES_BASE + [ + # dst_prefix='10.0.0.0/24 + bgp.FlowSpecDestPrefix(addr='10.0.0.0', length=24), + # src_prefix='20.0.0.1/24' + bgp.FlowSpecSrcPrefix(addr='20.0.0.0', length=24), + # ip_proto='6' + bgp.FlowSpecIPProtocol( + operator=bgp.FlowSpecIPProtocol.EQ, value=6), + # fragment='LF' + bgp.FlowSpecFragment( + operator=0, # Partial match + value=bgp.FlowSpecFragment.LF), + # fragment='==FF' + bgp.FlowSpecFragment( + operator=bgp.FlowSpecFragment.MATCH, + value=bgp.FlowSpecFragment.FF), + # fragment='&==ISF' + bgp.FlowSpecFragment( + operator=(bgp.FlowSpecFragment.AND | + bgp.FlowSpecFragment.MATCH), + value=bgp.FlowSpecFragment.ISF), + # fragment='!=DF' + bgp.FlowSpecFragment( + operator=bgp.FlowSpecFragment.NOT, + value=bgp.FlowSpecFragment.DF) + ] + msg = bgp.FlowSpecVPNv4NLRI.from_user( + route_dist='65001:250', + dst_prefix='10.0.0.0/24', + src_prefix='20.0.0.0/24', + ip_proto='6', + port='>=8000 & <=9000 | ==80', + dst_port='8080 >9000&<9050 | <=1000', + src_port='<=9090 & >=9080 <10100 & >10000', + icmp_type=0, + icmp_code=6, + tcp_flags='SYN+ACK & !=URGENT', + packet_len='1000 & 1100', + dscp='22 24', + fragment='LF ==FF&==ISF | !=DF') + msg2 = bgp.FlowSpecVPNv4NLRI(route_dist='65001:250', rules=rules) + binmsg = msg.serialize() + binmsg2 = msg2.serialize() + eq_(str(msg), str(msg2)) + eq_(binary_str(binmsg), binary_str(binmsg2)) + msg3, rest = bgp.FlowSpecVPNv4NLRI.parser(binmsg) + eq_(str(msg), str(msg3)) + eq_(rest, b'') + + def test_flowspec_user_interface_ipv6(self): + rules = RULES_BASE + [ + # dst_prefix='2001:2/128/32' + bgp.FlowSpecIPv6DestPrefix( + addr='2001::2', offset=32, length=128), + # src_prefix='3002::3/128' + bgp.FlowSpecIPv6SrcPrefix( + addr='3002::3', length=128), + # ip_proto='6' + bgp.FlowSpecNextHeader( + operator=bgp.FlowSpecNextHeader.EQ, value=6), + # fragment='LF' + bgp.FlowSpecIPv6Fragment( + operator=0, # Partial match + value=bgp.FlowSpecFragment.LF), + # fragment='==FF' + bgp.FlowSpecIPv6Fragment( + operator=bgp.FlowSpecFragment.MATCH, + value=bgp.FlowSpecFragment.FF), + # fragment='&==ISF' + bgp.FlowSpecIPv6Fragment( + operator=(bgp.FlowSpecFragment.AND | + bgp.FlowSpecFragment.MATCH), + value=bgp.FlowSpecFragment.ISF), + # fragment='!=LF' + bgp.FlowSpecIPv6Fragment( + operator=bgp.FlowSpecFragment.NOT, + value=bgp.FlowSpecFragment.LF), + # flowlabel='100' + bgp.FlowSpecIPv6FlowLabel( + operator=bgp.FlowSpecIPv6FlowLabel.EQ, + value=100), + ] + msg = bgp.FlowSpecIPv6NLRI.from_user( + dst_prefix='2001::2/128/32', + src_prefix='3002::3/128', + next_header='6', + port='>=8000 & <=9000 | ==80', + dst_port='8080 >9000&<9050 | <=1000', + src_port='<=9090 & >=9080 <10100 & >10000', + icmp_type=0, + icmp_code=6, + tcp_flags='SYN+ACK & !=URGENT', + packet_len='1000 & 1100', + dscp='22 24', + fragment='LF ==FF&==ISF | !=LF', + flow_label=100, + ) + msg2 = bgp.FlowSpecIPv6NLRI(rules=rules) + binmsg = msg.serialize() + binmsg2 = msg2.serialize() + eq_(str(msg), str(msg2)) + eq_(binary_str(binmsg), binary_str(binmsg2)) + msg3, rest = bgp.FlowSpecIPv6NLRI.parser(binmsg) + eq_(str(msg), str(msg3)) + eq_(rest, b'') + + def test_flowspec_user_interface_vpnv6(self): + rules = RULES_BASE + [ + # dst_prefix='2001:2/128/32' + bgp.FlowSpecIPv6DestPrefix( + addr='2001::2', offset=32, length=128), + # src_prefix='3002::3/128' + bgp.FlowSpecIPv6SrcPrefix( + addr='3002::3', length=128), + # ip_proto='6' + bgp.FlowSpecNextHeader( + operator=bgp.FlowSpecNextHeader.EQ, value=6), + # fragment='LF' + bgp.FlowSpecIPv6Fragment( + operator=0, # Partial match + value=bgp.FlowSpecFragment.LF), + # fragment='==FF' + bgp.FlowSpecIPv6Fragment( + operator=bgp.FlowSpecFragment.MATCH, + value=bgp.FlowSpecFragment.FF), + # fragment='&==ISF' + bgp.FlowSpecIPv6Fragment( + operator=(bgp.FlowSpecFragment.AND | + bgp.FlowSpecFragment.MATCH), + value=bgp.FlowSpecFragment.ISF), + # fragment='!=LF' + bgp.FlowSpecIPv6Fragment( + operator=bgp.FlowSpecFragment.NOT, + value=bgp.FlowSpecFragment.LF), + # flowlabel='100' + bgp.FlowSpecIPv6FlowLabel( + operator=bgp.FlowSpecIPv6FlowLabel.EQ, + value=100), + ] + msg = bgp.FlowSpecVPNv6NLRI.from_user( + route_dist='65001:250', + dst_prefix='2001::2/128/32', + src_prefix='3002::3/128', + next_header='6', + port='>=8000 & <=9000 | ==80', + dst_port='8080 >9000&<9050 | <=1000', + src_port='<=9090 & >=9080 <10100 & >10000', + icmp_type=0, + icmp_code=6, + tcp_flags='SYN+ACK & !=URGENT', + packet_len='1000 & 1100', + dscp='22 24', + fragment='LF ==FF&==ISF | !=LF', + flow_label=100, + ) + msg2 = bgp.FlowSpecVPNv6NLRI(route_dist='65001:250', rules=rules) + binmsg = msg.serialize() + binmsg2 = msg2.serialize() + eq_(str(msg), str(msg2)) + eq_(binary_str(binmsg), binary_str(binmsg2)) + msg3, rest = bgp.FlowSpecVPNv6NLRI.parser(binmsg) + eq_(str(msg), str(msg3)) + eq_(rest, b'') + + def test_flowspec_user_interface_l2vpn(self): + rules = RULES_L2VPN_BASE + msg = bgp.FlowSpecL2VPNNLRI.from_user( + route_dist='65001:250', + ether_type=0x0800, + src_mac='12:34:56:78:90:AB', + dst_mac='BE:EF:C0:FF:EE:DD', + llc_dsap=0x42, + llc_ssap=0x42, + llc_control=100, + snap=0x12345, + vlan_id='>4000', + vlan_cos='>=3', + inner_vlan_id='<3000', + inner_vlan_cos='<=5', + ) + msg2 = bgp.FlowSpecL2VPNNLRI(route_dist='65001:250', rules=rules) + binmsg = msg.serialize() + binmsg2 = msg2.serialize() + eq_(str(msg), str(msg2)) + eq_(binary_str(binmsg), binary_str(binmsg2)) + msg3, rest = bgp.FlowSpecL2VPNNLRI.parser(binmsg) + eq_(str(msg), str(msg3)) + eq_(rest, b'') diff -Nru ryu-4.9/ryu/tests/unit/packet/test_dhcp.py ryu-4.15/ryu/tests/unit/packet/test_dhcp.py --- ryu-4.9/ryu/tests/unit/packet/test_dhcp.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/packet/test_dhcp.py 2017-07-02 11:08:32.000000000 +0000 @@ -16,10 +16,13 @@ import inspect import logging -import six import struct import unittest + +import six from nose.tools import eq_ +from nose.tools import ok_ + from ryu.lib import addrconv from ryu.lib.packet import dhcp @@ -42,7 +45,7 @@ siaddr = '192.168.30.30' giaddr = '192.168.40.40' sname = 'abc' - boot_file = b'' + boot_file = '' option_list = [ dhcp.option(dhcp.DHCP_MESSAGE_TYPE_OPT, b'\x02', 1), @@ -62,24 +65,25 @@ ciaddr=ciaddr, yiaddr=yiaddr, siaddr=siaddr, giaddr=giaddr, sname=sname, boot_file=boot_file) - buf = b"\x02\x01\x06\x00\x00\x00\x00\x01\x00\x00\x00\x01\xc0\xa8\x0a\x0a"\ - + b"\xc0\xa8\x14\x14\xc0\xa8\x1e\x1e\xc0\xa8\x28\x28\xaa\xaa\xaa\xaa"\ - + b"\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x61\x62\x63\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"\ - + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63"\ - + b"\x35\x01\x02\x01\x04\xff\xff\xff\x00\x03\x04\xc0\xa8\x0a\x09\x06"\ - + b"\x04\xc0\xa8\x0a\x09\x33\x04\x00\x03\xf4\x80\x3a\x04\x00\x01\xfa"\ - + b"\x40\x3b\x04\x00\x03\x75\xf0\x36\x04\xc0\xa8\x0a\x09\xff" + buf = ( + b"\x02\x01\x06\x00\x00\x00\x00\x01\x00\x00\x00\x01\xc0\xa8\x0a\x0a" + b"\xc0\xa8\x14\x14\xc0\xa8\x1e\x1e\xc0\xa8\x28\x28\xaa\xaa\xaa\xaa" + b"\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x61\x62\x63\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x63\x82\x53\x63" + b"\x35\x01\x02\x01\x04\xff\xff\xff\x00\x03\x04\xc0\xa8\x0a\x09\x06" + b"\x04\xc0\xa8\x0a\x09\x33\x04\x00\x03\xf4\x80\x3a\x04\x00\x01\xfa" + b"\x40\x3b\x04\x00\x03\x75\xf0\x36\x04\xc0\xa8\x0a\x09\xff") def setUp(self): pass @@ -105,11 +109,7 @@ eq_(str(self.options), str(self.dh.options)) def test_parser(self): - _res = self.dh.parser(self.buf) - if type(_res) is tuple: - res = _res[0] - else: - res = _res + res, _, rest = dhcp.dhcp.parser(self.buf) eq_(self.op, res.op) eq_(self.htype, res.htype) @@ -126,19 +126,29 @@ # sname is 64 byte length. rest of data is filled by '\x00'. eq_(self.sname.ljust(64, '\x00'), res.sname) # boof_file is 128 byte length. rest of data is filled by '\x00'. - eq_(self.boot_file.ljust(128, b'\x00'), res.boot_file) + eq_(self.boot_file.ljust(128, '\x00'), res.boot_file) eq_(str(self.options), str(res.options)) + eq_(b'', rest) def test_parser_corrupted(self): - buf = self.buf[:128 - (14 + 20 + 8)] - _res = self.dh.parser(buf) + corrupt_buf = self.buf[:-4] + pkt, _, rest = dhcp.dhcp.parser(corrupt_buf) + + ok_(isinstance(pkt, dhcp.dhcp)) + ok_(isinstance(pkt.options, dhcp.options)) + for opt in pkt.options.option_list[:-1]: + ok_(isinstance(opt, dhcp.option)) + ok_(isinstance(pkt.options.option_list[-1], six.binary_type)) + + buf = pkt.serialize() + eq_(str(buf), str(corrupt_buf)) + eq_(b'', rest) def test_serialize(self): - data = bytearray() - prev = None - buf = self.dh.serialize(data, prev) + buf = self.dh.serialize() - res = struct.unpack_from(dhcp.dhcp._DHCP_PACK_STR, six.binary_type(buf)) + res = struct.unpack_from(dhcp.dhcp._DHCP_PACK_STR, + six.binary_type(buf)) eq_(self.op, res[0]) eq_(self.htype, res[1]) @@ -155,7 +165,7 @@ # sname is 64 byte length. rest of data is filled by '\x00'. eq_(self.sname.ljust(64, '\x00'), res[12].decode('ascii')) # boof_file is 128 byte length. rest of data is filled by '\x00'. - eq_(self.boot_file.ljust(128, b'\x00'), res[13]) + eq_(self.boot_file.ljust(128, '\x00'), res[13].decode('ascii')) options = dhcp.options.parser( buf[struct.calcsize(dhcp.dhcp._DHCP_PACK_STR):]) eq_(str(self.options), str(options)) @@ -206,56 +216,3 @@ jsondict = self.dh.to_jsondict() dh = dhcp.dhcp.from_jsondict(jsondict['dhcp']) eq_(str(self.dh), str(dh)) - - -class Test_dhcp_offer_with_hlen_zero(unittest.TestCase): - - op = dhcp.DHCP_BOOT_REPLY - chaddr = 'aa:aa:aa:aa:aa:aa' - htype = 1 - hlen = 6 - hops = 0 - xid = 1 - secs = 0 - flags = 1 - ciaddr = '192.168.10.10' - yiaddr = '192.168.20.20' - siaddr = '192.168.30.30' - giaddr = '192.168.40.40' - sname = 'abc' - boot_file = '' - - option_list = [ - dhcp.option(dhcp.DHCP_MESSAGE_TYPE_OPT, b'\x02', 1), - dhcp.option(dhcp.DHCP_SUBNET_MASK_OPT, b'\xff\xff\xff\x00', 4), - dhcp.option(dhcp.DHCP_GATEWAY_ADDR_OPT, b'\xc0\xa8\x0a\x09', 4), - dhcp.option(dhcp.DHCP_DNS_SERVER_ADDR_OPT, b'\xc0\xa8\x0a\x09', 4), - dhcp.option(dhcp.DHCP_IP_ADDR_LEASE_TIME_OPT, b'\x00\x03\xf4\x80', 4), - dhcp.option(dhcp.DHCP_RENEWAL_TIME_OPT, b'\x00\x01\xfa\x40', 4), - dhcp.option(dhcp.DHCP_REBINDING_TIME_OPT, b'\x00\x03\x75\xf0', 4), - dhcp.option(dhcp.DHCP_SERVER_IDENTIFIER_OPT, b'\xc0\xa8\x0a\x09', 4)] - magic_cookie = '99.130.83.99' - options = dhcp.options(option_list=option_list, options_len=50, - magic_cookie=magic_cookie) - - dh = dhcp.dhcp(op, chaddr, options, htype=htype, hlen=0, - hops=hops, xid=xid, secs=secs, flags=flags, - ciaddr=ciaddr, yiaddr=yiaddr, siaddr=siaddr, - giaddr=giaddr, sname=sname, boot_file=boot_file) - - def test_init(self): - eq_(self.op, self.dh.op) - eq_(self.htype, self.dh.htype) - eq_(self.hlen, self.dh.hlen) - eq_(self.hops, self.dh.hops) - eq_(self.xid, self.dh.xid) - eq_(self.secs, self.dh.secs) - eq_(self.flags, self.dh.flags) - eq_(self.ciaddr, self.dh.ciaddr) - eq_(self.yiaddr, self.dh.yiaddr) - eq_(self.siaddr, self.dh.siaddr) - eq_(self.giaddr, self.dh.giaddr) - eq_(self.chaddr, self.dh.chaddr) - eq_(self.sname, self.dh.sname) - eq_(self.boot_file, self.dh.boot_file) - eq_(str(self.options), str(self.dh.options)) diff -Nru ryu-4.9/ryu/tests/unit/packet/test_geneve.py ryu-4.15/ryu/tests/unit/packet/test_geneve.py --- ryu-4.9/ryu/tests/unit/packet/test_geneve.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/packet/test_geneve.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,62 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import logging +import os +import sys + +import unittest +from nose.tools import eq_ +from nose.tools import ok_ + +from ryu.lib import pcaplib +from ryu.lib.packet import geneve +from ryu.lib.packet import packet +from ryu.utils import binary_str + + +LOG = logging.getLogger(__name__) + +GENEVE_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), + '../../packet_data/pcap/') + + +class Test_geneve(unittest.TestCase): + """ + Test case for ryu.lib.packet.geneve. + """ + + def test_parser(self): + files = [ + 'geneve_unknown', + ] + + for f in files: + # print('*** testing %s ...' % f) + for _, buf in pcaplib.Reader( + open(GENEVE_DATA_DIR + f + '.pcap', 'rb')): + # Checks if message can be parsed as expected. + pkt = packet.Packet(buf) + geneve_pkt = pkt.get_protocol(geneve.geneve) + ok_(isinstance(geneve_pkt, geneve.geneve), + 'Failed to parse Geneve message: %s' % pkt) + + # Checks if message can be serialized as expected. + pkt.serialize() + eq_(buf, pkt.data, + "b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data))) diff -Nru ryu-4.9/ryu/tests/unit/packet/test_gre.py ryu-4.15/ryu/tests/unit/packet/test_gre.py --- ryu-4.9/ryu/tests/unit/packet/test_gre.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/packet/test_gre.py 2017-07-02 11:08:32.000000000 +0000 @@ -13,69 +13,103 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function -import unittest import logging -import struct +import os +import sys -import six -from nose.tools import eq_, raises +import unittest +from nose.tools import eq_ +from nose.tools import ok_ -from ryu.lib.packet.gre import gre -from ryu.lib.packet.ether_types import ETH_TYPE_IP +from ryu.lib import pcaplib +from ryu.lib.packet import gre +from ryu.lib.packet import packet +from ryu.utils import binary_str +from ryu.lib.packet.ether_types import ETH_TYPE_IP, ETH_TYPE_TEB LOG = logging.getLogger(__name__) +GENEVE_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), + '../../packet_data/pcap/') + class Test_gre(unittest.TestCase): - """ Test case for gre + """ + Test case gre for ryu.lib.packet.gre. """ - protocol = ETH_TYPE_IP + version = 0 + gre_proto = ETH_TYPE_IP + nvgre_proto = ETH_TYPE_TEB checksum = 0x440d - key = 1000 seq_number = 10 - - buf = struct.pack("!BBHH2xII", 0xb0, 0, protocol, checksum, key, seq_number) - gre = gre(protocol, checksum, key, seq_number) - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_init(self): - eq_(self.protocol, self.gre.protocol) - eq_(self.checksum, self.gre.checksum) - eq_(self.key, self.gre.key) - eq_(self.seq_number, self.gre.seq_number) + key = 256100 + vsid = 1000 + flow_id = 100 + + gre = gre.gre(version=version, protocol=gre_proto, checksum=checksum, + key=key, seq_number=seq_number) + + def test_key_setter(self): + self.gre.key = self.key + eq_(self.gre._key, self.key) + eq_(self.gre._vsid, self.vsid) + eq_(self.gre._flow_id, self.flow_id) + + def test_key_setter_none(self): + self.gre.key = None + eq_(self.gre._key, None) + eq_(self.gre._vsid, None) + eq_(self.gre._flow_id, None) + + self.gre.key = self.key + + def test_vsid_setter(self): + self.gre.vsid = self.vsid + eq_(self.gre._key, self.key) + eq_(self.gre._vsid, self.vsid) + eq_(self.gre._flow_id, self.flow_id) + + def test_flowid_setter(self): + self.gre.flow_id = self.flow_id + eq_(self.gre._key, self.key) + eq_(self.gre._vsid, self.vsid) + eq_(self.gre._flow_id, self.flow_id) + + def test_nvgre_init(self): + nvgre = gre.nvgre(version=self.version, vsid=self.vsid, + flow_id=self.flow_id) + + eq_(nvgre.version, self.version) + eq_(nvgre.protocol, self.nvgre_proto) + eq_(nvgre.checksum, None) + eq_(nvgre.seq_number, None) + eq_(nvgre._key, self.key) + eq_(nvgre._vsid, self.vsid) + eq_(nvgre._flow_id, self.flow_id) def test_parser(self): - res, _, _ = self.gre.parser(self.buf) + files = [ + 'gre_full_options', + 'gre_no_option', + 'gre_nvgre_option', + ] + + for f in files: + # print('*** testing %s ...' % f) + for _, buf in pcaplib.Reader( + open(GENEVE_DATA_DIR + f + '.pcap', 'rb')): + # Checks if message can be parsed as expected. + pkt = packet.Packet(buf) + gre_pkt = pkt.get_protocol(gre.gre) + ok_(isinstance(gre_pkt, gre.gre), + 'Failed to parse Gre message: %s' % pkt) + + # Checks if message can be serialized as expected. + pkt.serialize() - eq_(res.protocol, self.protocol) - eq_(res.checksum, self.checksum) - eq_(res.key, self.key) - eq_(res.seq_number, self.seq_number) - - def test_serialize(self): - buf = self.gre.serialize() - res = struct.unpack_from("!BBHH2xII", six.binary_type(buf)) - - eq_(res[0], 0xb0) - eq_(res[1], 0) - eq_(res[2], self.protocol) - eq_(res[3], self.checksum) - eq_(res[4], self.key) - eq_(res[5], self.seq_number) - - @raises(Exception) - def test_malformed_gre(self): - m_short_buf = self.buf[1:gre._MIN_LEN] - gre.parser(m_short_buf) - - def test_json(self): - jsondict = self.gre.to_jsondict() - g = gre.from_jsondict(jsondict['gre']) - eq_(str(self.gre), str(g)) + eq_(buf, pkt.data, + "b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data))) diff -Nru ryu-4.9/ryu/tests/unit/packet/test_openflow.py ryu-4.15/ryu/tests/unit/packet/test_openflow.py --- ryu-4.9/ryu/tests/unit/packet/test_openflow.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/packet/test_openflow.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,64 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import logging +import os +import sys + +import unittest +from nose.tools import eq_ +from nose.tools import ok_ + +from ryu.lib import pcaplib +from ryu.lib.packet import openflow +from ryu.lib.packet import packet +from ryu.utils import binary_str + + +LOG = logging.getLogger(__name__) + +OPENFLOW_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), + '../../packet_data/pcap/') + + +class Test_openflow(unittest.TestCase): + """ + Test case for ryu.lib.packet.openflow. + """ + + def test_pcap(self): + files = [ + 'openflow_flowmod', + 'openflow_flowstats_req', + 'openflow_invalid_version', + ] + + for f in files: + # print('*** testing %s ...' % f) + for _, buf in pcaplib.Reader( + open(OPENFLOW_DATA_DIR + f + '.pcap', 'rb')): + # Checks if message can be parsed as expected. + pkt = packet.Packet(buf) + openflow_pkt = pkt.get_protocol(openflow.openflow) + ok_(isinstance(openflow_pkt, openflow.openflow), + 'Failed to parse OpenFlow message: %s' % pkt) + + # Checks if message can be serialized as expected. + pkt.serialize() + eq_(buf, pkt.data, + "b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data))) diff -Nru ryu-4.9/ryu/tests/unit/packet/test_zebra.py ryu-4.15/ryu/tests/unit/packet/test_zebra.py --- ryu-4.9/ryu/tests/unit/packet/test_zebra.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/packet/test_zebra.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,66 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import sys +import unittest + +from nose.tools import eq_ +from nose.tools import ok_ +import six + +from ryu.lib import pcaplib +from ryu.lib.packet import packet +from ryu.lib.packet import zebra +from ryu.utils import binary_str + + +PCAP_DATA_DIR = os.path.join( + os.path.dirname(sys.modules[__name__].__file__), + '../../packet_data/pcap/') + + +class Test_zebra(unittest.TestCase): + """ + Test case for ryu.lib.packet.zebra. + """ + + def test_pcap(self): + files = [ + 'zebra_v2', + 'zebra_v3', + ] + + for f in files: + zebra_pcap_file = os.path.join(PCAP_DATA_DIR, f + '.pcap') + # print('*** testing %s' % zebra_pcap_file) + + for _, buf in pcaplib.Reader(open(zebra_pcap_file, 'rb')): + # Checks if Zebra message can be parsed as expected. + pkt = packet.Packet(buf) + zebra_pkts = pkt.get_protocols(zebra.ZebraMessage) + for zebra_pkt in zebra_pkts: + ok_(isinstance(zebra_pkt, zebra.ZebraMessage), + 'Failed to parse Zebra message: %s' % pkt) + ok_(not isinstance(pkt.protocols[-1], + (six.binary_type, bytearray)), + 'Some messages could not be parsed: %s' % pkt) + + # Checks if Zebra message can be serialized as expected. + pkt.serialize() + eq_(buf, pkt.data, + "b'%s' != b'%s'" % (binary_str(buf), binary_str(pkt.data))) diff -Nru ryu-4.9/ryu/tests/unit/services/protocols/bgp/core_managers/test_table_manager.py ryu-4.15/ryu/tests/unit/services/protocols/bgp/core_managers/test_table_manager.py --- ryu-4.9/ryu/tests/unit/services/protocols/bgp/core_managers/test_table_manager.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/services/protocols/bgp/core_managers/test_table_manager.py 2017-07-02 11:08:32.000000000 +0000 @@ -28,16 +28,29 @@ from ryu.lib.packet.bgp import BGP_ATTR_ORIGIN_IGP from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH +from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES from ryu.lib.packet.bgp import IPAddrPrefix from ryu.lib.packet.bgp import IP6AddrPrefix from ryu.lib.packet.bgp import EvpnArbitraryEsi +from ryu.lib.packet.bgp import EvpnLACPEsi +from ryu.lib.packet.bgp import EvpnEthernetAutoDiscoveryNLRI from ryu.lib.packet.bgp import EvpnMacIPAdvertisementNLRI from ryu.lib.packet.bgp import EvpnInclusiveMulticastEthernetTagNLRI +from ryu.lib.packet.bgp import FlowSpecIPv4NLRI +from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities +from ryu.services.protocols.bgp.bgpspeaker import EVPN_MAX_ET +from ryu.services.protocols.bgp.bgpspeaker import ESI_TYPE_LACP +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_IPV4 +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_FAMILY_VPNV4 +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TA_SAMPLE +from ryu.services.protocols.bgp.bgpspeaker import FLOWSPEC_TA_TERMINAL from ryu.services.protocols.bgp.core import BgpCoreError from ryu.services.protocols.bgp.core_managers import table_manager from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4 from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV6 from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_L2_EVPN +from ryu.services.protocols.bgp.rtconf.vrfs import VRF_RF_IPV4_FLOWSPEC +from ryu.services.protocols.bgp.utils.bgp import create_v4flowspec_actions LOG = logging.getLogger(__name__) @@ -115,7 +128,7 @@ next_hop, route_family, route_type, **kwargs) - def test_update_vrf_table_l2_evpn_with_esi(self): + def test_update_vrf_table_l2_evpn_with_esi_int(self): # Prepare test data route_dist = '65000:100' prefix_str = None # should be ignored @@ -139,6 +152,31 @@ next_hop, route_family, route_type, **kwargs) + def test_update_vrf_table_l2_evpn_with_esi_dict(self): + # Prepare test data + route_dist = '65000:100' + prefix_str = None # should be ignored + kwargs = { + 'ethernet_tag_id': EVPN_MAX_ET, + } + esi = EvpnLACPEsi(mac_addr='aa:bb:cc:dd:ee:ff', port_key=100) + prefix_inst = EvpnEthernetAutoDiscoveryNLRI( + route_dist=route_dist, + esi=esi, + **kwargs) + next_hop = '0.0.0.0' + route_family = VRF_RF_L2_EVPN + route_type = EvpnEthernetAutoDiscoveryNLRI.ROUTE_TYPE_NAME + kwargs['esi'] = { + 'type': ESI_TYPE_LACP, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'port_key': 100, + } + + self._test_update_vrf_table(prefix_inst, route_dist, prefix_str, + next_hop, route_family, route_type, + **kwargs) + def test_update_vrf_table_l2_evpn_without_esi(self): # Prepare test data route_dist = '65000:100' @@ -402,3 +440,498 @@ is_withdraw=True, expected_next_hop='::', ) + + @mock.patch( + 'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__', + mock.MagicMock(return_value=None)) + def _test_update_flowspec_vrf_table(self, flowspec_family, route_family, + route_dist, rules, prefix, + is_withdraw, actions=None): + # Instantiate TableCoreManager + tbl_mng = table_manager.TableCoreManager(None, None) + vrf_table_mock = mock.MagicMock() + tbl_mng._tables = {(route_dist, route_family): vrf_table_mock} + + # Test + tbl_mng.update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules, + actions=actions, + is_withdraw=is_withdraw, + ) + + # Check + call_args_list = vrf_table_mock.insert_vrffs_path.call_args_list + ok_(len( + call_args_list) == 1) # insert_vrffs_path should be called once + args, kwargs = call_args_list[0] + ok_(len(args) == 0) # no positional argument + eq_(prefix, kwargs['nlri'].prefix) + eq_(is_withdraw, kwargs['is_withdraw']) + + def test_update_flowspec_vrf_table_vpnv4(self): + flowspec_family = 'vpnv4fs' + route_family = 'ipv4fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_vrf_table_vpnv4_without_actions(self): + flowspec_family = 'vpnv4fs' + route_family = 'ipv4fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_vpnv4_invalid_actions(self): + flowspec_family = 'vpnv4fs' + route_family = 'ipv4fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + actions = { + 'invalid_actions': { + 'invalid_param': 10, + }, + } + prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_vpnv4_invalid_flowspec_family(self): + flowspec_family = 'invalid' + route_family = 'ipv4fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_vpnv4_invalid_route_family(self): + flowspec_family = 'vpnv4fs' + route_family = 'invalid' + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + prefix = 'ipv4fs(dst_prefix:10.70.1.0/24)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @mock.patch( + 'ryu.services.protocols.bgp.core_managers.TableCoreManager.__init__', + mock.MagicMock(return_value=None)) + @mock.patch( + 'ryu.services.protocols.bgp.core_managers.TableCoreManager.learn_path') + def _test_update_flowspec_global_table(self, learn_path_mock, + flowspec_family, rules, prefix, + is_withdraw, actions=None): + # Instantiate TableCoreManager + tbl_mng = table_manager.TableCoreManager(None, None) + + # Test + tbl_mng.update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + actions=actions, + is_withdraw=is_withdraw, + ) + + # Check + call_args_list = learn_path_mock.call_args_list + ok_(len(call_args_list) == 1) # learn_path should be called once + args, kwargs = call_args_list[0] + ok_(len(kwargs) == 0) # no keyword argument + output_path = args[0] + eq_(None, output_path.source) + eq_(prefix, output_path.nlri.prefix) + eq_(None, output_path.nexthop) + eq_(is_withdraw, output_path.is_withdraw) + + def test_update_flowspec_global_table_ipv4(self): + flowspec_family = 'ipv4fs' + rules = { + 'dst_prefix': '10.60.1.0/24', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'ipv4fs(dst_prefix:10.60.1.0/24)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_global_table_ipv4_without_actions(self): + flowspec_family = 'ipv4fs' + rules = { + 'dst_prefix': '10.60.1.0/24', + } + prefix = 'ipv4fs(dst_prefix:10.60.1.0/24)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @raises(BgpCoreError) + def test_update_flowspec_global_table_ipv4_invalid_actions(self): + flowspec_family = 'ipv4fs' + rules = { + 'dst_prefix': '10.60.1.0/24', + } + actions = { + 'invalid_actions': { + 'invalid_param': 10, + }, + } + prefix = 'ipv4fs(dst_prefix:10.60.1.0/24)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + @raises(BgpCoreError) + def test_update_flowspec_global_table_ipv4_invalid_flowspec_family(self): + flowspec_family = 'invalid' + rules = { + 'dst_prefix': '10.60.1.0/24', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'ipv4fs(dst_prefix:10.60.1.0/24)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_global_table_ipv6(self): + flowspec_family = 'ipv6fs' + rules = { + 'dst_prefix': '2001::3/128/32', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'ipv6fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_global_table_ipv6_without_actions(self): + flowspec_family = 'ipv6fs' + rules = { + 'dst_prefix': '2001::3/128/32', + } + prefix = 'ipv6fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @raises(BgpCoreError) + def test_update_flowspec_global_table_ipv6_invalid_actions(self): + flowspec_family = 'ipv6fs' + rules = { + 'dst_prefix': '2001::3/128/32', + } + actions = { + 'invalid_actions': { + 'invalid_param': 10, + }, + } + prefix = 'ipv4fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + @raises(BgpCoreError) + def test_update_flowspec_global_table_ipv6_invalid_flowspec_family(self): + flowspec_family = 'invalid' + rules = { + 'dst_prefix': '2001::3/128/32', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'ipv4fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_global_table( + flowspec_family=flowspec_family, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_vrf_table_vpnv6(self): + flowspec_family = 'vpnv6fs' + route_family = 'ipv6fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '2001::3/128/32', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'ipv6fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_vrf_table_vpnv6_without_actions(self): + flowspec_family = 'vpnv6fs' + route_family = 'ipv6fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '2001::3/128/32', + } + prefix = 'ipv6fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_vpnv6_invalid_actions(self): + flowspec_family = 'vpnv6fs' + route_family = 'ipv6fs' + route_dist = '65001:100' + rules = { + 'dst_prefix': '2001::3/128/32', + } + actions = { + 'invalid_actions': { + 'invalid_param': 10, + }, + } + prefix = 'ipv6fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_vpnv6_invalid_route_family(self): + flowspec_family = 'vpnv6fs' + route_family = 'invalid' + route_dist = '65001:100' + rules = { + 'dst_prefix': '2001::3/128/32', + } + prefix = 'ipv4fs(dst_prefix:2001::3/128/32)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + def test_update_flowspec_vrf_table_l2vpn(self): + flowspec_family = 'l2vpnfs' + route_family = 'l2vpnfs' + route_dist = '65001:100' + rules = { + 'dst_mac': '12:34:56:78:9a:bc', + } + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + } + prefix = 'l2vpnfs(dst_mac:12:34:56:78:9a:bc)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + def test_update_flowspec_vrf_table_l2vpn_without_actions(self): + flowspec_family = 'l2vpnfs' + route_family = 'l2vpnfs' + route_dist = '65001:100' + rules = { + 'dst_mac': '12:34:56:78:9a:bc', + } + prefix = 'l2vpnfs(dst_mac:12:34:56:78:9a:bc)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_l2vpn_invalid_actions(self): + flowspec_family = 'l2vpnfs' + route_family = 'l2vpnfs' + route_dist = '65001:100' + rules = { + 'dst_mac': '12:34:56:78:9a:bc', + } + actions = { + 'invalid_actions': { + 'invalid_param': 10, + }, + } + prefix = 'l2vpnfs(dst_mac:12:34:56:78:9a:bc)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + actions=actions, + ) + + @raises(BgpCoreError) + def test_update_flowspec_vrf_table_l2vpn_invalid_route_family(self): + flowspec_family = 'l2vpnfs' + route_family = 'invalid' + route_dist = '65001:100' + rules = { + 'dst_mac': '12:34:56:78:9a:bc', + } + prefix = 'l2vpnfs(dst_mac:12:34:56:78:9a:bc)' + + self._test_update_flowspec_vrf_table( + flowspec_family=flowspec_family, + route_family=route_family, + route_dist=route_dist, + rules=rules, + prefix=prefix, + is_withdraw=False, + ) diff -Nru ryu-4.9/ryu/tests/unit/services/protocols/bgp/test_bgpspeaker.py ryu-4.15/ryu/tests/unit/services/protocols/bgp/test_bgpspeaker.py --- ryu-4.9/ryu/tests/unit/services/protocols/bgp/test_bgpspeaker.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/services/protocols/bgp/test_bgpspeaker.py 2017-07-02 11:08:32.000000000 +0000 @@ -23,6 +23,12 @@ from nose.tools import raises from ryu.services.protocols.bgp import bgpspeaker +from ryu.services.protocols.bgp.bgpspeaker import EVPN_MAX_ET +from ryu.services.protocols.bgp.bgpspeaker import ESI_TYPE_LACP +from ryu.services.protocols.bgp.api.prefix import ESI_TYPE_L2_BRIDGE +from ryu.services.protocols.bgp.bgpspeaker import ESI_TYPE_MAC_BASED +from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_ALL_ACTIVE +from ryu.services.protocols.bgp.api.prefix import REDUNDANCY_MODE_SINGLE_ACTIVE LOG = logging.getLogger(__name__) @@ -36,6 +42,86 @@ @mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', mock.MagicMock(return_value=None)) @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_evpn_prefix_add_eth_auto_discovery(self, mock_call): + # Prepare test data + route_type = bgpspeaker.EVPN_ETH_AUTO_DISCOVERY + route_dist = '65000:100' + esi = { + 'type': ESI_TYPE_LACP, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'port_key': 100, + } + ethernet_tag_id = EVPN_MAX_ET + redundancy_mode = REDUNDANCY_MODE_ALL_ACTIVE + next_hop = '0.0.0.0' + expected_kwargs = { + 'route_type': route_type, + 'route_dist': route_dist, + 'esi': esi, + 'ethernet_tag_id': ethernet_tag_id, + 'redundancy_mode': redundancy_mode, + 'next_hop': next_hop, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.evpn_prefix_add( + route_type=route_type, + route_dist=route_dist, + esi=esi, + ethernet_tag_id=ethernet_tag_id, + redundancy_mode=redundancy_mode, + ) + + # Check + mock_call.assert_called_with( + 'evpn_prefix.add_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_evpn_prefix_add_eth_auto_discovery_vni(self, mock_call): + # Prepare test data + route_type = bgpspeaker.EVPN_ETH_AUTO_DISCOVERY + route_dist = '65000:100' + esi = { + 'type': ESI_TYPE_L2_BRIDGE, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'priority': 100, + } + ethernet_tag_id = EVPN_MAX_ET + redundancy_mode = REDUNDANCY_MODE_SINGLE_ACTIVE + vni = 500 + next_hop = '0.0.0.0' + expected_kwargs = { + 'route_type': route_type, + 'route_dist': route_dist, + 'esi': esi, + 'ethernet_tag_id': ethernet_tag_id, + 'redundancy_mode': redundancy_mode, + 'vni': vni, + 'next_hop': next_hop, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.evpn_prefix_add( + route_type=route_type, + route_dist=route_dist, + esi=esi, + ethernet_tag_id=ethernet_tag_id, + redundancy_mode=redundancy_mode, + vni=vni + ) + + # Check + mock_call.assert_called_with( + 'evpn_prefix.add_local', **expected_kwargs) + + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') def test_evpn_prefix_add_mac_ip_adv(self, mock_call): # Prepare test data route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE @@ -195,6 +281,42 @@ 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', mock.MagicMock(return_value=None)) @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_evpn_prefix_add_eth_segment(self, mock_call): + # Prepare test data + route_type = bgpspeaker.EVPN_ETH_SEGMENT + route_dist = '65000:100' + esi = { + 'type': ESI_TYPE_MAC_BASED, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'local_disc': 100, + } + ip_addr = '192.168.0.1' + next_hop = '0.0.0.0' + expected_kwargs = { + 'route_type': route_type, + 'route_dist': route_dist, + 'esi': esi, + 'ip_addr': ip_addr, + 'next_hop': next_hop, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.evpn_prefix_add( + route_type=route_type, + route_dist=route_dist, + esi=esi, + ip_addr=ip_addr, + ) + + # Check + mock_call.assert_called_with( + 'evpn_prefix.add_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') def test_evpn_prefix_add_ip_prefix_route(self, mock_call): # Prepare test data route_type = bgpspeaker.EVPN_IP_PREFIX_ROUTE @@ -303,6 +425,40 @@ mock_call.assert_called_with( 'evpn_prefix.add_local', 'Invalid arguments detected') + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_evpn_prefix_del_auto_discovery(self, mock_call): + # Prepare test data + route_type = bgpspeaker.EVPN_ETH_AUTO_DISCOVERY + route_dist = '65000:100' + esi = { + 'type': ESI_TYPE_LACP, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'port_key': 100, + } + ethernet_tag_id = EVPN_MAX_ET + expected_kwargs = { + 'route_type': route_type, + 'route_dist': route_dist, + 'esi': esi, + 'ethernet_tag_id': ethernet_tag_id, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.evpn_prefix_del( + route_type=route_type, + route_dist=route_dist, + esi=esi, + ethernet_tag_id=ethernet_tag_id, + ) + + # Check + mock_call.assert_called_with( + 'evpn_prefix.delete_local', **expected_kwargs) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', mock.MagicMock(return_value=None)) @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') @@ -310,14 +466,12 @@ # Prepare test data route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE route_dist = '65000:100' - esi = 0 # denotes single-homed ethernet_tag_id = 200 mac_addr = 'aa:bb:cc:dd:ee:ff' ip_addr = '192.168.0.1' expected_kwargs = { 'route_type': route_type, 'route_dist': route_dist, - 'esi': esi, 'ethernet_tag_id': ethernet_tag_id, 'mac_addr': mac_addr, 'ip_addr': ip_addr, @@ -328,7 +482,6 @@ speaker.evpn_prefix_del( route_type=route_type, route_dist=route_dist, - esi=esi, ethernet_tag_id=ethernet_tag_id, mac_addr=mac_addr, ip_addr=ip_addr, @@ -405,6 +558,40 @@ 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', mock.MagicMock(return_value=None)) @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_evpn_prefix_del_eth_segment(self, mock_call): + # Prepare test data + route_type = bgpspeaker.EVPN_ETH_SEGMENT + route_dist = '65000:100' + esi = { + 'esi_type': ESI_TYPE_MAC_BASED, + 'mac_addr': 'aa:bb:cc:dd:ee:ff', + 'local_disc': 100, + } + ip_addr = '192.168.0.1' + expected_kwargs = { + 'route_type': route_type, + 'route_dist': route_dist, + 'esi': esi, + 'ip_addr': ip_addr, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.evpn_prefix_del( + route_type=route_type, + route_dist=route_dist, + esi=esi, + ip_addr=ip_addr, + ) + + # Check + mock_call.assert_called_with( + 'evpn_prefix.delete_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') def test_evpn_prefix_del_ip_prefix_route(self, mock_call): # Prepare test data route_type = bgpspeaker.EVPN_IP_PREFIX_ROUTE @@ -527,3 +714,375 @@ # Check mock_call.assert_called_with( 'evpn_prefix.add_local', 'Invalid arguments detected') + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_ipv4(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_IPV4 + rules = { + 'dst_prefix': '10.60.1.0/24', + } + + actions = { + 'traffic_marking': { + 'dscp': 24, + } + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'rules': rules, + 'actions': actions, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + rules=rules, + actions=actions) + + # Check + mock_call.assert_called_with( + 'flowspec.add', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_ipv4_without_actions(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_IPV4 + rules = { + 'dst_prefix': '10.60.1.0/24', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'rules': rules, + 'actions': {}, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.add', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_del_ipv4(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_IPV4 + rules = { + 'dst_prefix': '10.60.1.0/24', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'rules': rules, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_del( + flowspec_family=flowspec_family, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.del', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_vpnv4(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_VPNV4 + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + + actions = { + 'traffic_marking': { + 'dscp': 24, + } + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'route_dist': route_dist, + 'rules': rules, + 'actions': actions, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules, + actions=actions) + + # Check + mock_call.assert_called_with( + 'flowspec.add_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_del_vpnv4(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_VPNV4 + route_dist = '65001:100' + rules = { + 'dst_prefix': '10.70.1.0/24', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'route_dist': route_dist, + 'rules': rules, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_del( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.del_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_ipv6(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_IPV6 + rules = { + 'dst_prefix': '2001::3/128/32', + } + + actions = { + 'traffic_marking': { + 'dscp': 24, + } + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'rules': rules, + 'actions': actions, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + rules=rules, + actions=actions) + + # Check + mock_call.assert_called_with( + 'flowspec.add', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_ipv6_without_actions(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_IPV6 + rules = { + 'dst_prefix': '2001::3/128/32', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'rules': rules, + 'actions': {}, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.add', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_del_ipv6(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_IPV6 + rules = { + 'dst_prefix': '2001::3/128/32', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'rules': rules, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_del( + flowspec_family=flowspec_family, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.del', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_vpnv6(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_VPNV6 + route_dist = '65001:100' + rules = { + 'dst_prefix': '2001::3/128/32', + } + + actions = { + 'traffic_marking': { + 'dscp': 24, + } + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'route_dist': route_dist, + 'rules': rules, + 'actions': actions, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules, + actions=actions) + + # Check + mock_call.assert_called_with( + 'flowspec.add_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_del_vpnv6(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_VPNV6 + route_dist = '65001:100' + rules = { + 'dst_prefix': '2001::3/128/32', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'route_dist': route_dist, + 'rules': rules, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_del( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.del_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_add_l2vpn(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_L2VPN + route_dist = '65001:100' + rules = { + 'dst_mac': '12:34:56:78:9a:bc', + } + + actions = { + 'traffic_marking': { + 'dscp': 24, + } + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'route_dist': route_dist, + 'rules': rules, + 'actions': actions, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_add( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules, + actions=actions) + + # Check + mock_call.assert_called_with( + 'flowspec.add_local', **expected_kwargs) + + @mock.patch( + 'ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__', + mock.MagicMock(return_value=None)) + @mock.patch('ryu.services.protocols.bgp.bgpspeaker.call') + def test_flowspec_prefix_del_l2vpn(self, mock_call): + # Prepare test data + flowspec_family = bgpspeaker.FLOWSPEC_FAMILY_L2VPN + route_dist = '65001:100' + rules = { + 'dst_mac': '12:34:56:78:9a:bc', + } + + expected_kwargs = { + 'flowspec_family': flowspec_family, + 'route_dist': route_dist, + 'rules': rules, + } + + # Test + speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1') + speaker.flowspec_prefix_del( + flowspec_family=flowspec_family, + route_dist=route_dist, + rules=rules) + + # Check + mock_call.assert_called_with( + 'flowspec.del_local', **expected_kwargs) diff -Nru ryu-4.9/ryu/tests/unit/services/protocols/bgp/utils/test_bgp.py ryu-4.15/ryu/tests/unit/services/protocols/bgp/utils/test_bgp.py --- ryu-4.9/ryu/tests/unit/services/protocols/bgp/utils/test_bgp.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/services/protocols/bgp/utils/test_bgp.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,211 @@ +# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import unittest + +from nose.tools import eq_, raises + +from ryu.lib.packet.bgp import ( + BGPFlowSpecTrafficRateCommunity, + BGPFlowSpecTrafficActionCommunity, + BGPFlowSpecRedirectCommunity, + BGPFlowSpecTrafficMarkingCommunity, + BGPFlowSpecVlanActionCommunity, + BGPFlowSpecTPIDActionCommunity, +) + +from ryu.services.protocols.bgp.core import BgpCoreError +from ryu.services.protocols.bgp.utils.bgp import create_v4flowspec_actions +from ryu.services.protocols.bgp.utils.bgp import create_v6flowspec_actions +from ryu.services.protocols.bgp.utils.bgp import create_l2vpnflowspec_actions + + +LOG = logging.getLogger(__name__) + + +class Test_Utils_BGP(unittest.TestCase): + """ + Test case for ryu.services.protocols.bgp.utils.bgp + """ + + def _test_create_v4flowspec_actions(self, actions, expected_communities): + communities = create_v4flowspec_actions(actions) + expected_communities.sort(key=lambda x: x.subtype) + communities.sort(key=lambda x: x.subtype) + eq_(str(expected_communities), str(communities)) + + def test_create_v4flowspec_actions_all_actions(self): + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': 3, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 10, + }, + 'traffic_marking': { + 'dscp': 24, + } + } + expected_communities = [ + BGPFlowSpecTrafficRateCommunity(as_number=0, rate_info=100.0), + BGPFlowSpecTrafficActionCommunity(action=3), + BGPFlowSpecRedirectCommunity(as_number=10, local_administrator=10), + BGPFlowSpecTrafficMarkingCommunity(dscp=24), + ] + self._test_create_v4flowspec_actions(actions, expected_communities) + + def test_create_v4flowspec_actions_without_actions(self): + actions = None + expected_communities = [] + self._test_create_v4flowspec_actions(actions, expected_communities) + + @raises(ValueError) + def test_create_v4flowspec_actions_not_exist_actions(self): + actions = { + 'traffic_test': { + 'test': 10, + }, + } + expected_communities = [] + self._test_create_v4flowspec_actions(actions, expected_communities) + + def _test_create_v6flowspec_actions(self, actions, expected_communities): + communities = create_v6flowspec_actions(actions) + expected_communities.sort(key=lambda x: x.subtype) + communities.sort(key=lambda x: x.subtype) + eq_(str(expected_communities), str(communities)) + + def test_create_v6flowspec_actions_all_actions(self): + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': 3, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 10, + }, + 'traffic_marking': { + 'dscp': 24, + } + } + expected_communities = [ + BGPFlowSpecTrafficRateCommunity(as_number=0, rate_info=100.0), + BGPFlowSpecTrafficActionCommunity(action=3), + BGPFlowSpecRedirectCommunity(as_number=10, local_administrator=10), + BGPFlowSpecTrafficMarkingCommunity(dscp=24), + ] + self._test_create_v6flowspec_actions(actions, expected_communities) + + def test_create_v6flowspec_actions_without_actions(self): + actions = None + expected_communities = [] + self._test_create_v6flowspec_actions(actions, expected_communities) + + @raises(ValueError) + def test_create_v6flowspec_actions_not_exist_actions(self): + actions = { + 'traffic_test': { + 'test': 10, + }, + } + expected_communities = [] + self._test_create_v6flowspec_actions(actions, expected_communities) + + def _test_create_l2vpnflowspec_actions(self, actions, expected_communities): + communities = create_l2vpnflowspec_actions(actions) + expected_communities.sort(key=lambda x: x.subtype) + communities.sort(key=lambda x: x.subtype) + eq_(str(expected_communities), str(communities)) + + def test_create_l2vpnflowspec_actions_all_actions(self): + actions = { + 'traffic_rate': { + 'as_number': 0, + 'rate_info': 100.0, + }, + 'traffic_action': { + 'action': 3, + }, + 'redirect': { + 'as_number': 10, + 'local_administrator': 10, + }, + 'traffic_marking': { + 'dscp': 24, + }, + 'vlan_action': { + 'actions_1': (BGPFlowSpecVlanActionCommunity.POP | + BGPFlowSpecVlanActionCommunity.SWAP), + 'vlan_1': 3000, + 'cos_1': 3, + 'actions_2': BGPFlowSpecVlanActionCommunity.PUSH, + 'vlan_2': 4000, + 'cos_2': 2, + }, + 'tpid_action': { + 'actions': (BGPFlowSpecTPIDActionCommunity.TI | + BGPFlowSpecTPIDActionCommunity.TO), + 'tpid_1': 5, + 'tpid_2': 6, + } + } + expected_communities = [ + BGPFlowSpecTrafficRateCommunity(as_number=0, rate_info=100.0), + BGPFlowSpecTrafficActionCommunity(action=3), + BGPFlowSpecRedirectCommunity(as_number=10, local_administrator=10), + BGPFlowSpecTrafficMarkingCommunity(dscp=24), + BGPFlowSpecVlanActionCommunity( + actions_1=(BGPFlowSpecVlanActionCommunity.POP | + BGPFlowSpecVlanActionCommunity.SWAP), + vlan_1=3000, + cos_1=3, + actions_2=BGPFlowSpecVlanActionCommunity.PUSH, + vlan_2=4000, + cos_2=2, + ), + BGPFlowSpecTPIDActionCommunity( + actions=(BGPFlowSpecTPIDActionCommunity.TI | + BGPFlowSpecTPIDActionCommunity.TO), + tpid_1=5, + tpid_2=6, + ), + ] + self._test_create_l2vpnflowspec_actions(actions, expected_communities) + + def test_create_l2vpnflowspec_actions_without_actions(self): + actions = None + expected_communities = [] + self._test_create_l2vpnflowspec_actions(actions, expected_communities) + + @raises(ValueError) + def test_create_l2vpnflowspec_actions_not_exist_actions(self): + actions = { + 'traffic_test': { + 'test': 10, + }, + } + expected_communities = [] + self._test_create_l2vpnflowspec_actions(actions, expected_communities) diff -Nru ryu-4.9/ryu/tests/unit/test_requirements.py ryu-4.15/ryu/tests/unit/test_requirements.py --- ryu-4.9/ryu/tests/unit/test_requirements.py 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/ryu/tests/unit/test_requirements.py 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,85 @@ +# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import sys +import unittest + +import pkg_resources +from pip.req import parse_requirements +from pip.download import PipSession +from six.moves import urllib + +from nose.tools import ok_ + + +LOG = logging.getLogger(__name__) + +MOD_DIR = os.path.dirname(sys.modules[__name__].__file__) +_RYU_REQUIREMENTS_FILES = [ + '../../../tools/pip-requires', + '../../../tools/optional-requires', +] +RYU_REQUIREMENTS_FILES = [ + os.path.join(MOD_DIR, f) for f in _RYU_REQUIREMENTS_FILES] + +OPENSTACK_REQUIREMENTS_REPO = 'https://github.com/openstack/requirements' +OPENSTACK_REQUIREMENTS_URL = ( + 'https://github.com/openstack/requirements/raw/master/') +_OPENSTACK_REQUIREMENTS_FILES = [ + 'requirements.txt', + 'global-requirements.txt', +] +OPENSTACK_REQUIREMENTS_FILES = [ + urllib.parse.urljoin(OPENSTACK_REQUIREMENTS_URL, f) + for f in _OPENSTACK_REQUIREMENTS_FILES] + + +def _get_requirements(files): + requirements = {} + for f in files: + req = parse_requirements(f, session=PipSession()) + for r in req: + requirements[r.name] = str(r.req) + + return requirements + +OPENSTACK_REQUIREMENTS = _get_requirements(OPENSTACK_REQUIREMENTS_FILES) +RYU_REQUIREMENTS = _get_requirements(RYU_REQUIREMENTS_FILES) + + +class TestRequirements(unittest.TestCase): + """ + Test whether the requirements of Ryu has no conflict with that + of other projects. + """ + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_with_openstack_requirements(self): + try: + for name, req in OPENSTACK_REQUIREMENTS.items(): + if name in RYU_REQUIREMENTS: + ok_(pkg_resources.require(req)) + except pkg_resources.VersionConflict as e: + LOG.exception( + 'Some requirements of Ryu are conflicting with that of ' + 'OpenStack project: %s' % OPENSTACK_REQUIREMENTS_REPO) + raise e diff -Nru ryu-4.9/ryu/utils.py ryu-4.15/ryu/utils.py --- ryu-4.9/ryu/utils.py 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/ryu/utils.py 2017-07-02 11:08:32.000000000 +0000 @@ -14,29 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - import importlib import logging import os import sys -import re import six +from pip import req as pip_req +from pip.download import PipSession + LOG = logging.getLogger('ryu.utils') @@ -94,29 +80,34 @@ return None -def import_module(modname): +def _import_module_file(path): + abspath = os.path.abspath(path) + # Backup original sys.path before appending path to file + original_path = list(sys.path) + sys.path.append(os.path.dirname(abspath)) + modname = chop_py_suffix(os.path.basename(abspath)) try: - # Import module with python module path - # e.g.) modname = 'module.path.module_name' - return importlib.import_module(modname) - except (ImportError, TypeError): - # In this block, we retry to import module when modname is filename - # e.g.) modname = 'module/path/module_name.py' - abspath = os.path.abspath(modname) - # Check if specified modname is already imported - mod = _find_loaded_module(abspath) - if mod: - return mod - # Backup original sys.path before appending path to file - original_path = list(sys.path) - sys.path.append(os.path.dirname(abspath)) - # Remove python suffix - name = chop_py_suffix(os.path.basename(modname)) - # Retry to import - mod = importlib.import_module(name) - # Restore sys.path + return load_source(modname, abspath) + finally: + # Restore original sys.path sys.path = original_path - return mod + + +def import_module(modname): + if os.path.exists(modname): + try: + # Try to import module since 'modname' is a valid path to a file + # e.g.) modname = './path/to/module/name.py' + return _import_module_file(modname) + except SyntaxError: + # The file didn't parse as valid Python code, try + # importing module assuming 'modname' is a Python module name + # e.g.) modname = 'path.to.module.name' + return importlib.import_module(modname) + else: + # Import module assuming 'modname' is a Python module name + # e.g.) modname = 'path.to.module.name' + return importlib.import_module(modname) def round_up(x, y): @@ -139,38 +130,29 @@ return ''.join('\\x%02x' % byte for byte in bytearray(data)) -# the following functions are taken from OpenStack -# -# Get requirements from the first file that exists -def get_reqs_from_files(requirements_files): - for requirements_file in requirements_files: - if os.path.exists(requirements_file): - with open(requirements_file, 'r') as fil: - return fil.read().split('\n') - return [] +def parse_requirements(requirements_files=None): + """ + Parses requirements files and returns a list of requirements. + Returned list would be like:: + + ['foo', 'bar>=X.X', ...] + + :param requirements_files: List of requirements files. The default + is ['requirements.txt', 'tools/pip-requires']. + :return: List of requirements. + """ + requirements_files = requirements_files or [ + 'requirements.txt', + 'tools/pip-requires', + ] -def parse_requirements(requirements_files=None): - requirements_files = requirements_files if requirements_files else [ - 'requirements.txt', 'tools/pip-requires'] requirements = [] - for line in get_reqs_from_files(requirements_files): - # For the requirements list, we need to inject only the portion - # after egg= so that distutils knows the package it's looking for - # such as: - # -e git://github.com/openstack/nova/master#egg=nova - if re.match(r'\s*-e\s+', line): - requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', - line)) - # such as: - # http://github.com/openstack/nova/zipball/master#egg=nova - elif re.match(r'\s*https?:', line): - requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1', - line)) - # -f lines are for index locations, and don't get used here - elif re.match(r'\s*-f\s+', line): - pass - else: - requirements.append(line) + for f in requirements_files: + if not os.path.isfile(f): + continue + + for r in pip_req.parse_requirements(f, session=PipSession()): + requirements.append(str(r.req)) return requirements diff -Nru ryu-4.9/tools/doc-requires ryu-4.15/tools/doc-requires --- ryu-4.9/tools/doc-requires 1970-01-01 00:00:00.000000000 +0000 +++ ryu-4.15/tools/doc-requires 2017-07-02 11:08:32.000000000 +0000 @@ -0,0 +1,2 @@ +Sphinx +sphinx-rtd-theme diff -Nru ryu-4.9/tools/optional-requires ryu-4.15/tools/optional-requires --- ryu-4.9/tools/optional-requires 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/tools/optional-requires 2017-07-02 11:08:32.000000000 +0000 @@ -1,4 +1,5 @@ -lxml # OF-Config +lxml!=3.7.0,>=2.3 # OF-Config ncclient # OF-Config cryptography!=1.5.2 # Required by paramiko paramiko # NETCONF, BGP speaker (SSH console) +SQLAlchemy>=1.0.10,<1.1.0 # Zebra protocol service diff -Nru ryu-4.9/tools/pip-requires ryu-4.15/tools/pip-requires --- ryu-4.9/tools/pip-requires 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/tools/pip-requires 2017-07-02 11:08:32.000000000 +0000 @@ -1,4 +1,7 @@ -eventlet>=0.15 +# NOTE: OpenStack avoids the newer versions of eventlet, because of the +# following issue. +# https://github.com/eventlet/eventlet/issues/401 +eventlet!=0.18.3,>=0.18.2,!=0.20.1,<0.21.0 msgpack-python>=0.3.0 # RPC library, BGP speaker(net_cntl) netaddr oslo.config>=1.15.0 diff -Nru ryu-4.9/tox.ini ryu-4.15/tox.ini --- ryu-4.9/tox.ini 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/tox.ini 2017-07-02 11:08:32.000000000 +0000 @@ -2,19 +2,22 @@ envlist = py27,py34,py35,pypy26,pep8 [testenv] -deps = -U - -r{toxinidir}/tools/pip-requires - -r{toxinidir}/tools/optional-requires - -r{toxinidir}/tools/test-requires - --no-cache-dir +deps = + -U + -r{toxinidir}/tools/pip-requires + --no-cache-dir usedevelop = True passenv= NOSE_VERBOSE +# Note: To check whether tools/pip-requires satisfies the requirements +# for running Ryu, the following runs ryu-manager berfore installing +# the addtional requirements. commands = + ryu-manager ryu/tests/unit/cmd/dummy_openflow_app.py + pip install -r{toxinidir}/tools/optional-requires -r{toxinidir}/tools/test-requires coverage run --source=ryu ryu/tests/run_tests.py '{posargs}' [testenv:scenario] commands = - bash ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh python ryu/tests/integrated/run_test.py [testenv:py27] @@ -28,6 +31,10 @@ {[testenv:scenario]commands} [testenv:pep8] +deps = + -U + --no-cache-dir + pep8 commands = pep8 diff -Nru ryu-4.9/.travis.yml ryu-4.15/.travis.yml --- ryu-4.9/.travis.yml 2016-12-05 07:37:18.000000000 +0000 +++ ryu-4.15/.travis.yml 2017-07-02 11:08:32.000000000 +0000 @@ -16,6 +16,7 @@ install: - pip install tox coveralls + - bash ryu/tests/integrated/common/install_docker_test_pkg_for_travis.sh script: - NOSE_VERBOSE=0 tox -e $TOX_ENV