diff -Nru neutron-9.0.0~b2~dev280/api-ref/README.rst neutron-9.0.0~b3~dev557/api-ref/README.rst --- neutron-9.0.0~b2~dev280/api-ref/README.rst 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/api-ref/README.rst 2016-08-03 20:10:33.000000000 +0000 @@ -1,2 +1,2 @@ -Networking API referennce is maintained in the neutron-lib repo. +Networking API reference is maintained in the neutron-lib repo. See api-ref in the neutron-lib repository. diff -Nru neutron-9.0.0~b2~dev280/AUTHORS neutron-9.0.0~b3~dev557/AUTHORS --- neutron-9.0.0~b2~dev280/AUTHORS 2016-06-27 15:31:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/AUTHORS 2016-08-29 20:06:00.000000000 +0000 @@ -43,9 +43,12 @@ Andrew Boik Andrey Epifanov Andrey Kurilin +Andrey Shestakov Andy Hill Angela Smith Angus Lees +Anh Tran +Anindita Das Ankur Gupta Ann Kamyshnikova Ante Karamatic @@ -66,6 +69,7 @@ Attila Fazekas Aviram Bar-Haim Avishay Balderman +Babu Shanmugam Baodong (Robert) Li Baodong Li Baohua Yang @@ -73,15 +77,19 @@ Ben Nemec Bence Romsics Benedikt Trefzer +Bernard Cafarelli Bernhard M. Wiedemann Bertrand Lallau Bhagyashri Shewale Bhuvan Arumugam Billy Olsen +Bin Yu +Bin Zhou Bo Chi Bo Wang Bob Kukura Bob Melander +Boden R Bogdan Tabor Boris Pavlovic Brad Hall @@ -131,6 +139,7 @@ Dariusz Smigiel Darragh O'Reilly Darragh O'Reilly +Darragh O'Reilly Darren Birkett Davanum Srinivas Davanum Srinivas @@ -140,6 +149,7 @@ David Edery David Ripton David Shaughnessy +David-gb Dazhao Debo Deepak N @@ -162,6 +172,7 @@ Duarte Nunes Dustin Lundquist Ed Bak +Edan David Edgar Magana Edgar Magana Einst Crazy @@ -174,6 +185,8 @@ Eric Brown Eric Windisch Erik Colnick +Erik Colnick +Ester Niclos Ferreras Eugene Nikanorov Evgeny Fedoruk Eyal @@ -193,11 +206,13 @@ Gary Kotton Gary Kotton Gauvain Pocentek +Genadi Chereshnya Gerard Braad Ghe Rivero Gong Zhang Gordon Chung Gordon Chung +Graham Hayes Guilherme Salgado Ha Van Tu Haim Daniel @@ -219,6 +234,7 @@ Hirofumi Ichihara Hironori Shiina Hisaharu Ishii +Hong Hui Xiao Hong Hui Xiao Huan Xie Hui HX Xiang @@ -274,6 +290,7 @@ Jian Wen Jian Wen Jianing Yang +JieLee Joe Gordon Joe Harrison Joe Heck @@ -298,8 +315,10 @@ Julia Varlamova Juliano Martinez Juliano Martinez +Julie Pichon Julien Danjou Jun Park +Junjie Wang Justin Hammond Justin Lund KAWAI Hiroaki @@ -324,9 +343,11 @@ Koteswara Rao Kelam Kris Lindgren Kui Shi +Kumar Acharya Kun Huang Kyle Mestery Kyle Mestery +LIU Yulong LIU Yulong Lajos Katona Lars Kellogg-Stedman @@ -348,6 +369,7 @@ Lucian Petrut Luis A. Garcia Luiz H Ozaki +Lujin Luo Luke Gorrie Ly Loi Madhav Puri @@ -367,6 +389,7 @@ Mark T. Voelker Martin Hickey Martin Kletzander +Martin Roy Martin Roy Martins Jakubovics Maru Newby @@ -404,6 +427,7 @@ Mithil Arun Mitsuhiro SHIGEMATSU Mohammad Banikazemi +Mohit Malik Monty Taylor Morgan Fainberg Moshe Levi @@ -421,6 +445,7 @@ Nate Johnston Neil Jerram Nguyen Hung Phuong +Nguyen Phuong An Nguyen Tuong Thanh Nick Nick Bartos @@ -441,6 +466,7 @@ Peng Xiao Peng Yong Peter Feiner +Petronio Carlos Bezerra Petrut Lucian Pierre RAMBAUD Pierre Rognant @@ -448,6 +474,7 @@ Piotr Siwczak Pradeep Kilambi Praneet Bachheti +Prashant Shetty Prasoon Telang Praveen Kumar SM Praveen Yalagandula @@ -456,6 +483,7 @@ Przemyslaw Czesnowicz Qiaowei Ren Qin Zhao +QunyingRan Rahul Priyadarshi Raildo Mascena Rajaram Mallya @@ -532,6 +560,8 @@ Sascha Peilicke Sascha Peilicke Sascha Peilicke +SauloAislan +Saurabh Chordiya Sayaji Sean Dague Sean Dague @@ -562,6 +592,7 @@ Shweta Patil Siming Yin Simon Pasquier +Sindhu Devale Sitaram Dontu Slawek Kaplonski Soheil Hassas Yeganeh @@ -582,6 +613,7 @@ Stephen Gordon Stephen Gran Stephen Ma +Steve Kipp Steven Gonzales Steven Hillman Steven Ren @@ -600,6 +632,7 @@ Swapnil Kulkarni (coolsvap) Sylvain Afchain Sławek Kapłoński +Sławek Kapłoński Takaaki Suzuki Takashi NATSUME Takuma Watanabe @@ -623,6 +656,7 @@ Tong Li Toni Ylenius Tony Xu +Tracy Jones Trinath Somanchi TrinathSomanchi Tu Hong Jun @@ -710,17 +744,22 @@ chen-li chen-li chnm-kulkarni +daz dekehn +dineshbhor dql dukhlov e0ne enriquetaso eperdomo eperdomo@cisco.com <> +fellypefca fujioka yuuichi fumihiko kakuma +gaozhengwei garyduan garyk +gengchc2 gessau gh159m gong yong sheng @@ -729,23 +768,28 @@ gordon chung guiyanxing hgangwx +hobo.kengo houming-wang huangpengtao +hujin hyunsun imran malik ivan-zhu jasonrad jingliuqing joe@midokura.com +john_a_joyce johndavidge jun xie jun xie justin Lund +karimb kedar kulkarni lawrancejing leejian0612 lei zhang lijianlj +lilintan linb liu-sheng liudong @@ -757,6 +801,7 @@ lzklibj malos mamtap +maoshuai marios mark mcclain mat @@ -769,6 +814,7 @@ niusmallnan nmagnezi openstack +qinchunhua rajeev rajeev reedip @@ -776,6 +822,7 @@ rohitagarwalla roagarwa@cisco.com <> ronak root +root root rossella sadasu @@ -786,6 +833,8 @@ shihanzhang shmcfarl shu,xinxin +sindhu devale +sindhudevale singhannie siyingchun snaiksat @@ -797,11 +846,13 @@ sukhdev sushma_korati sysnet +tianquan ting.wang tonytan4ever trinaths venkata anil venkata anil +venkatamahesh venkatamahesh vijaychundury vikas @@ -819,7 +870,9 @@ yujie yuyangbj zengfagao +zhangyanxian zhhuabj zhiyuan_cai +zhufl zoukeke@cmss.chinamobile.com Édouard Thuleau diff -Nru neutron-9.0.0~b2~dev280/ChangeLog neutron-9.0.0~b3~dev557/ChangeLog --- neutron-9.0.0~b2~dev280/ChangeLog 2016-06-27 15:31:49.000000000 +0000 +++ neutron-9.0.0~b3~dev557/ChangeLog 2016-08-29 20:05:57.000000000 +0000 @@ -1,13 +1,396 @@ CHANGES ======= +* fullstack: Use ovs-2.5 for tests +* Relocate Flat Allocation DB model +* Relocate subnet_service_types db models +* Remove unused config.CONF +* Remove unused logging import +* Add test cases for Invalid exception type +* Enable create and delete segments in ML2 +* Use MultipleExceptions from neutorn-lib +* Move standard attr out of model_base +* Wait for ovsdb_monitor to be active before use it +* Fix deprecation warnings +* Updated from global requirements +* Make addbr safe to bridge add races +* Deprecate allow_sorting and allow_pagination options +* Fix passing error physical network for get_mtu +* Fix indent +* Remove useless line for tenant_id +* Implement check_vlan_transparency to return True in L2population mech driver +* Security group call back need cascading delete the related rules +* Fix internal server error during updating QoS rule +* Make OVS and LinuxBridge trunk drivers' is_loaded() property more robust +* IP allocation with Service Subnets +* Include [agent] extensions option into ovs/linuxbridge agent files +* Fix the QoSPluginBase methods signature +* TrivialFix: Remove logging import unused +* TrunkManager for the OVS agent +* SR-IOV: remove unused supported_pci_vendor_info variable +* Catch SubnetAllocationError during auto-allocated-topology provisioning +* Revisit the Stadium section of the developer guide +* objects: add support for per parent type foreign keys +* Fix bug in L3 agent extension manager +* Added the appropriate links in developer guide +* L3 DVR: use fanout when sending dvr arp table update +* Fix the attribute name: _flavor_plugin_ref +* Implement the DELETE method for get-me-a-network +* Update README to reflect ML2 Exception in Dir Tree +* Revert "Fix NoSuchOptError on identity config option lookup" +* Use row.uuid as getattr works for inserted row +* Add mechanism driver error details to MechanismDriverError +* Make auto allocate cleanup retry +* Updated from global requirements +* Increase default packet count to 3 in assert_ping +* L2 Agent Extensions handle unimplemented methods +* Relocate GRE Db models +* docs: Small Open vSwitch devref tweaks +* Filter out external networks in NetworksSearchCriteriaTest +* Remove useless deprecation warning for tenant_id +* Fix init method for HasStandardAttributes +* Imported Translations from Zanata +* TrunkStub.trunk_deleted is called with NULL trunk object +* Fix NoSuchOptError on identity config option lookup +* Fix bridge assertion error when times are equal +* Avoid KeyError when accessing "dns_name" as it may not exist +* Add tool to list moved globals +* Introduce ovo objects for network segments +* Add agent-side driver scaffolding for trunk functionality +* Revert "Publish segment id in port responses" +* Increase rally network/port count and add quotas +* Extensions: fix file mode permissions +* Update the homepage with developer documentation page +* Don't create another plugin instance in ML2 tests +* Relocate AddressScope DB model +* Enable ra on gateway when add gateway to HA router +* Remove override of _compare_server_default in _TestModelsMigrations +* Make callback manager Object Oriented friendly +* Switch to pluggable IPAM implementation +* Update "devref/quality_of_service" with QoS DSCP rule reference +* Adjust spacing in METADATA_PROXY_HANDLER_OPTS +* Refactoring config options for plugin agent opts +* Added tests for checking expand/contract branch upgrade +* Don't pass argument sqlite_db in method set_defaults +* SR-IOV: deprecate supported_pci_vendor_devs +* Add error informations for users when value is invalid in database +* Implement L3 Agent Extension Manager +* Correct floating IP updating with same port_id issue +* Fixed neutron-db-manage without neutron/tests installed +* Clean imports in code +* Prevent duplicate SG rules in 'concurrent requests' case +* Relax bound constraint for trunk parent ports +* Fix potential problem in test_router_add_interface_port +* Fix test_router_add_interface_delete_port_after_failure +* Remove stale configuration l3_agent_manager +* Add RPC layer for Trunk Plugin and driver plumbing +* Make auto-allocate plugin handle sneaky DB errors +* Broken extensions should not show up in the extension list +* Introduce ovo objects for security groups +* Add debug option to verify iptables rules +* Avoid IPAM driver reusing a session that has been rolled back +* Fix neutron_lib.constants DeprecationWarning from db.models_v2 +* Use dispose_pool() from oslo.db +* Get rid of get_engine() in db/api.py +* models: move AllowedAddressPair model under neutron/db/models +* Refactor setting OSprofiler for db calls +* Raise 501 instead of 500 when updating meter-lebel and rule +* Updated from global requirements +* Constrain remaining tox targets +* Check content type by completely match instead of partial match +* Allow bound ports to be trunked if the driver can support it +* Publish segment id in port responses +* Fix some spelling errors in net_helpers.py +* Refactoring config options of l3 agent keepalived +* Fix check_asserttruefalse syntax validator +* Relocate Security Group DB models +* Add floating IP test to ensure backwards compat +* Always start transactions in quota cleanup methods +* Refactoring config options for services opts +* Fix a spelling error +* isolate test_db_find_column_type_list +* Adds a default reload callback to ProcessManager +* tests: added missing space in a skip test message +* Set secure fail mode for physical bridges +* Avoid allocating ports from ip_local_port_range +* lb-agent: handle exception when bridge slave already removed +* Ensure ML2's create/update_port methods not in transaction +* Add flush command to iproute in ip_lib +* Better utilize the L3 Namespace class +* Fix typo in l3-agent namespace code +* ovs-agent: Seperate VLAN mapping outside of the agent +* Updated from global requirements +* Check the router gateway IPs prefixlen existence +* pep8: fixed F821 violation in a unit test +* Add devref for Relocating DB models +* Handle deleted ports when creating a list of fdb entries +* Set bridge_name in OVS trunk port's vif_details +* ml2: allow retry on retriabable db error by precommit +* Rollback router intf port update if csnat update fails +* Enable DeprecationWarning in test environments +* Check target_tenant when create rbac policy +* Delete HA network if last HA router is migrated +* Add linux bridge trunk server side driver +* Enable CRUD for Subnet Service Types +* Make revision bump robust to concurrent removals +* Fix duplicate routerport handling +* Cleanup DB retry logic in ML2 +* Refactoring config options for extension opts +* Refactoring security group config options +* Don't use versions in _DeprecateSubset +* Add RouterPort bindings for all HA ports +* Log full exception before retry in decorator +* L3 agent: check router namespace existence before delete +* Consider baremetal device_owner as compute for nova notify +* Delete conntrack entry with remote_ip on the other direction +* Do not remove the HA/legacy router gateway secondary IPs +* DHCP Auto Scheduling for routed provider networks +* Restore old assert_ping behavior +* DVR: Clean stale snat-ns by checking its existence when agent restarts +* Remove neutron-lib warnings +* Ensure most of ML2's core methods not in transaction +* Add scaffolding for trunk plugin/server-side driver integration +* Remove neutron lib warnings from ipv6_utils +* Updated from global requirements +* Introduce state management for trunk resources +* Refactoring config options for wsgi opts +* Add a space after openvswitch error message +* Remove local subports validator +* objects: introduce count() API to count matching objects +* Rename DB columns: tenant -> project +* Fix for creation of network environment twice +* Use neutron-lib add_validator for registration +* objects: introduce a util function to handle tenant_id filter +* tests: check that trunk sub_ports field is properly populated +* Fix indexerror in delete_csnat_port +* Add a unique key to port_id in routerports table +* Refactoring cfg opts for ml2 plugin linuxbridge +* Port device events for common agent +* Fix module import for ovs_vsctl_timeout option +* Change external_network_bridge default to '' +* Fix link reference in OVS agent devref +* Support callbacks for L3 plugins without an agent +* Remove deprecated default subnetpools +* Fixes the midonet test_l3 unit test failures +* fixed a typo in src code +* Suppresses a warning when no agents are configured +* Introduce bulk push to rpc callback mechanism + +Add 'api-ref' for API reference" +-------------------------------- + +* Enable sorting and pagination by default +* Added API extensions to detect sorting/pagination features +* stadium: adopt openstack/releases in subproject release process +* L2-Adjacency support +* corrected the link in README.rst +* Neutron-lib: use the L3_AGENT* definitions from neutron-lib +* Fix a typo in neutron/services/trunk/rules.py +* Refactoring config options of agent/common/ovs_lib +* Add a callback registry event for the init process - before spawning +* Refactoring config options for cmd +* Don't use file() to write object hashes +* Fix L3 NAT DB signature mismatch +* Add in missing translations for exceptions +* Fix that api_workers=0 doesn't spawn any api workers +* Filter HA router without HA port bindings after race conditions +* Fix updating allocation_pools on subnet update +* trunk: avoid redundant refetch of subports on create +* tests: enable test_get_objects_queries_constant for trunk ports +* Don't use exponential back-off for report_state + +Add 'api-ref' for API reference +------------------------------- + +* bug tag: Add 'api-ref' for API reference +* Add link in README.rst +* Set prefix on floating_ip_mangle rules +* Remove 'released-neutronclient' tag from official bug tags +* Increment revision numbers on object changes +* Print out specific filter that failed in object filtering unit test +* objects: loading synthetic fields from defined ORM relationships +* objects: forbid updates for project_id field for subnets +* tests: stop using ml2 plugin full import paths in tests +* Add API tests for router and DHCP port status +* Skip DHCP provisioning block for network ports +* Wait for vswitchd to add interfaces in native ovsdb +* Add flavor/service provider support to routers +* Add some negative policy router interface tests +* Add notifications for trunk CRUD and standardize payload +* Refactoring config options for common config opts +* Prevent port update from binding a host where IPs won't work +* policies: Add tempest tag for launchpad bugs +* Fix for check_vlan_transparency on mech drivers not called +* Refactor DNS integration out of DB core plugin +* Fix typo in method description +* Filter out subnets on different segments for dhcp +* Add information about using file based sqlite for unit tests +* Deprecate implicit loading of service_providers from neutron_*.conf +* Remove deprecated network_device_mtu option +* objects: Add update_fields method in base class +* Remove unused code in neutron/agent/linux/utils.py +* Pass timeout in milliseconds to timer_wait +* Prohibit deletion of ports currently in use by a trunk +* Mark DBConnectionError as retriable +* Add subresources support for PECAN +* Refactoring config options for l3 agent config +* Improve the segmentation ID validation logic +* Revisit (add|remove)_subports request body +* objects: Adjust Subnet fields, add tenant_id and segment_id +* Use is_valid_port from oslo.utils +* Validate device to mac instead of port id to mac +* Updated from global requirements +* Don't interrupt device loop for missing device +* Enable passive deletes on trunk deletion +* Removed smoke tags from *SearchCriteriaTest test cases +* Calculate MTU on every network fetch instead of on create +* Fix wait_until_true condition in dhcp test +* Add callbacks for networks and subnets in ML2 +* Check compatibility when auto schedule ha routers +* Remove execute permission which is added by mistake in midonet +* Ensure test_cleanup_stale_devices fails gracefully +* Add new attributes to trunk model +* Generalize agent extension mechanism +* fullstack: Add hybrid plug support +* Use db_api.retry_db_errors in quota engine +* Update ovsdb release notes re: new OVS ports +* objects: better apply filters for objects/db/api/get_object query +* Use DEVICE_OWNER_COMPUTE_PREFIX from neutron-lib +* Imported Translations from Zanata +* Fix misuse of assertTrue in L3 DVR test case +* Pecan: Define plugin crud methods in base class +* Fix broken URLs in bugs.rst (core-reviewers.html -> neutron-teams.html) +* objects: Convert filters to string for list values +* fullstack: Log testrunner again +* QoSTest: skip if qos extension is not available +* Add support for Python 3.5 +* Only ports on routed networks are deferred +* Fill in trunk_details on port resource +* Fix a pylint error in an L3 agent unit test +* DVR: Fix ItemAllocator class to handle exceptions +* Add RouterPort binding to ha interface creation +* objects: Add RBAC to Subnet OVO +* Improve cleanup logic for trunk tests +* Updated from global requirements +* Add retry decorator to dhcp_ready_on_ports +* delete event payload +* Add function to return all hosts with mapped segments +* Handle non existing network in segment creation + +9.0.0.0b2 +--------- + +* Hacking: add unit test for LOG.warn validations +* Allow unique keys to be used with get_object +* Add object versioning to QoS DSCP +* Replace device owners hard coded strings to neutron_lib constants +* Add function to remove constraints from database +* Add dhcp to Fdb extension's permitted device owners +* Use context from_environ to load contexts +* Use from_dict to load context params +* Add a hacking rule for string interpolation at logging +* Add check that factory started in dispose +* Delete gw port on exceptions +* Avoid duplicate ipset processing for security groups +* DVR: handle floating IP reassociation on the same host +* Refactor usage of dict.values()[0] +* qos basic scenario +* Check for provisioning blocks before updating port up +* Rename dvr portbinding functions +* Emit registry events on subport addition/removal +* Ensure deferred IP fails when host is provided no IP allocated +* Extension to tell when deferred binding is in effect +* Fix typo in message string causing server exception +* Deprecate option min_l3_agents_per_router +* Address outstanding TODO for callback event +* Allow tox to be run with python 3 +* Incorporate tweaks to subport validator +* Allow auto-addressed ips deletion on port update +* Delete default route if no gateway in external net +* Add information about contract creation exceptions in devref +* ML2: don't use IntegrityError for duplicate detection +* Grammar error fixed +* Fixed Typo in contribute.rst +* Refactoring config options for dhcp agent +* Revert "Support unique labels for alembic branches" +* DVR: Ensure that only one fg device can exist at a time in fip ns +* New engine facade from oslo_db: Step 2 +* When deleting floating IP catch PortNotFound +* Notify nova with network-vif-plugged in case of live migration +* Skip TrunksSearchCriteriaTest if the extension is not available +* Don't catch DBDuplicate in default SG creation +* Catch missing binding record in provision handler +* Pull stadium projects from governance.o.o in utility script +* Add an independent function to map segment to hosts +* List only admin_state_up auto-allocated resources +* Change tunnel MTU calculation to support IPv6 +* Fix broken link +* ML2 remove extra checks in ovs_dvr_neutron_agent +* Updated from global requirements +* Fixed typos +* Fixes a link error +* next() is incompatible in test_network_ip_availability.py +* Run 'ip netns list' according to 'AGENT/use_helper_for_ns_read' +* Remove unused LOG +* Fix order of arguments in assertEqual +* Reuse common code in securitygroups_rpc module +* Release note: fix a typo in add-time-stamp-fields +* Imported Translations from Zanata +* Update the template for model sync test docs +* Add sorting and pagination tests for trunk resources +* Enable CRUD for trunk ports +* OVS-agent: Switch the default to "native" of_interface +* Use tempest.lib tenants_client +* Stable Branch URL Fixed +* Support unique labels for alembic branches +* create_router: Report the original exception +* ml2: postpone exception logs to when retry mechanism fails to recover +* Fix OVSBridge.set_protocols arg +* Create segment_host mapping after new network +* Fix spelling mistakes in the docs +* Adding the appropriate log hints where needed +* Lower ML2 message severity +* spelling error: modify assocations -> associations in files as follows: neutron/agent/l3/link_local_allocator.py:38 +* Make create_object_with_dependency cleanup +* Restore MySQL and Postgresql functional testing +* functional: Use assertItemsEqual for db_find outputs +* Adding FDB population agent extension +* pep8: Register checks with their code +* sriov: Fix macvtap vf interface regex pattern +* Mock threading.Thread to prevent daemon creation by unit tests +* Fix some typos +* Register the dict extend function when service plugin starts +* Remove notification for process event +* Add two more callbacks registry events +* Do not depend on Python error strings in parse_network_vlan_range() +* Fix code that's trying to read from a stale DB object +* Remove 'origin/' in OVS_BRANCH +* Only update SegmentHostMapping for the given host +* Move Nova notification logic out of API controller +* Create segment_host mapping after new segment +* Skip INVALID and UNASSIGNED ofport in vlan restore +* objects: introduce NetworkPortSecurity object +* objects: Introduce the DNSNameServer OVO in the code +* Implementation details to support trunk ports +* Move wait_until_true to neutron.common.utils +* Imported Translations from Zanata +* Agent extension: fix comment +* enable OVSDB native interface by default +* Pecan: Implement pagination +* Not auto schedule router when sync routers from agent +* Updated from global requirements +* Remove the deprecated config "quota_items" +* Fix simple typos * Create auto allocated networks in disabled state * Move DHCP notification logic out of API controller * Pecan: move fields and filters logic to hooks +* DHCP Agent scheduling with segments +* Fixes port device_id/device_owner change in failed operation * Remove the deprecated config 'router_id' * Separate exception class for retriables in callbacks * Revert "OVS: don't throw KeyError when duplicate VLAN tags exist" * Updated from global requirements +* Add revision_number to standard attr for OVO * Check for RetryRequest in MultipleException types * Remove IP availability range recalculation logic * Rename ml2_dvr_port_bindings to make it generic @@ -21,13 +404,16 @@ * Do not rewrite original exception for IPAM part 2 * Change addCleanup create_tenant to delete_tenant, fix gate * Obsolete mac_generation_retries and deprecate the option +* Remove unnecessary flush for duplicate address detection * Fix minor spelling error in debug log * tests: clean up designate client session mock on test exit * Remove unnecessary import from segment plugin * OVS: UnboundLocalError on switch timeout fixed * ovsfw: Fix variable names in UT * ovs: set device MTU after it's moved into a namespace +* cache_utils: fixed cache misses for the new (oslo.cache) configuration * Syntax fix +* ml2 lb: do not program arp responder when unused * Remove deprecated TODO notes in L2 agent extension manager * Fix pep8 violations in fullstack qos test * Don't return marker item when paginating backwards @@ -45,6 +431,7 @@ * qos: added api sorting/pagination tests for policies * Check for alembic Add/DropColumn exceptions in migrations * objects: switch base plugin class to using subnetpool object +* l3: support native sorting/pagination for routers * Added sorting/pagination tests for routers * Added sorting/pagination tests for subnets * DHCP: delete config option dnsmasq_dns_server @@ -69,6 +456,7 @@ * Revert "Add index on trunk_id in the subports model" * Update Neutron server to use only keystoneauth * Make segment aware IPAM compatible with ML2 +* Fix of ping usage in net_helpers.async_ping() * Remove MAC duplicate detection for generated macs * Pecan: handle single fields query parameter * Compute IPAvailabilityRanges in memory during IP allocation @@ -102,8 +490,10 @@ * Revert "DVR: Clear SNAT namespace when agent restarts after router move" * objects: Use common plugin _model_query in get_object * Tox: Remove neutron/openstack/common from excludes list +* Fix missing availability_zone in dhcp and l3 conf * qos: Add API test for shared policy * Imported Translations from Zanata +* l3_db: Make gw port in-use check overridable * Fix server_default comparison for BigInteger * Update ml2 delete_subnet to deallocate via ipam * Make IPAM segment aware on port update @@ -169,6 +559,7 @@ * Avoid shadowing the method's port argument * OVO for VLAN aware VMs * tests: cover port with existing sorting/pagination api tests +* Allow min_l3_agents_per_router to equal one * How to support trunk ports with Open vSwitch Agent * Introduce official lib tag for neutron-lib issues * Pecan: tell the plugin about field selection @@ -217,6 +608,7 @@ * Segment: remove deprecation warning for converters * Add negative API tests that try to remove the resources in use * Respond negatively to tenant detachment of enforced QoS policies +* Removed invalid test due to invalid mocking * Check if pool update is needed in reference driver * Remove cliff requirement in test-requirements.txt * sriov_nic config options were declared under wrong group @@ -233,6 +625,7 @@ * Add method to get service provider names by resources * Enable flavor plugin as a default service plugin * Add setting default max_burst value if not given by user +* Remove the file i18n.py and other related cleanups * Fix for 'ofport' query retries during neutron agent start * Segment extension: remove deprecated warnings * Add provisioning blocks to status ACTIVE transition @@ -362,6 +755,7 @@ * Avoid referencing code from master branch * Support interface drivers that don't support mtu parameter for plug_new * Use tempest plugin interface +* Add 169.254.169.254 when enable force_metadata * Fix deprecation warning for external_network_bridge * Add ALLOCATING state to routers * Change wrong word "propogated" to "propagated" @@ -529,6 +923,7 @@ ---------- * api tests: Check correct extensions +* devref: Remove stale description about network_ip_availability * Imported Translations from Zanata * Add db migrations test framework with data * Remove unnecessary executable permissions @@ -574,6 +969,7 @@ * DB: remove method _get_tenant_id_for_create * use separate device owner for HA router interface * QOS: Provide get methods for policy & network/port binding +* Fix spelling mistake * Fixes typo * Imported Translations from Zanata * functional: Update ref used from ovs branch-2.5 @@ -633,6 +1029,9 @@ * DVR: Agent side change for live migration with floatingip * DVR:Pro-active router creation with live migration * Return oslo_config Opts to config generator +* Update testing coverage document +* devref doc config option separation +* Added test cases for DVR L3 schedulers * Update Neutron with temporary registry pattern from VersionedObjectRegistry * Reset RNG seed with current time and pid for each test started * Create a hook in base object to modify the fields before DB operations diff -Nru neutron-9.0.0~b2~dev280/debian/changelog neutron-9.0.0~b3~dev557/debian/changelog --- neutron-9.0.0~b2~dev280/debian/changelog 2016-06-27 19:22:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/changelog 2016-08-29 20:15:21.000000000 +0000 @@ -1,21 +1,50 @@ -neutron (2:9.0.0~b2~dev280-0ubuntu1~ubuntu16.10.1~ppa201606271522) yakkety; urgency=medium +neutron (2:9.0.0~b3~dev557-0ubuntu1~ubuntu16.10.1~ppa201608291615) yakkety; urgency=medium + [ Corey Bryant ] + * d/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.service.in: + Fix Type=oneshot typo (LP: #1606652). + + [ James Page ] + * New upstream release. + * d/p/flake8-legacy.patch: Compat patch for newer flake8 version than + upstream OpenStack is currently aligned with. + + [ Corey Bryant ] + * New upstream version. * No-change backport to yakkety - -- Corey Bryant Mon, 27 Jun 2016 15:22:51 -0400 + -- Corey Bryant Mon, 29 Aug 2016 16:15:21 -0400 + +neutron (2:9.0.0~b2-0ubuntu3) yakkety; urgency=medium + + * d/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.init.in: + Run neutron-linuxbridge-cleanup with linuxbridge_agent.ini (LP: #1606657). + + -- Corey Bryant Fri, 05 Aug 2016 14:55:38 -0400 -neutron (2:9.0.0~b2~dev280-0ubuntu1) UNRELEASED; urgency=medium +neutron (2:9.0.0~b2-0ubuntu2) yakkety; urgency=medium + + * d/control: Add runtime dependency on python-ryu, now used as the default + openflow manager for the ML2/Open vSwitch agent. + * d/p/drop-ryu-dep.patch: Dropped, as we need ryu now for normal operation. + + -- James Page Tue, 19 Jul 2016 16:03:14 +0100 + +neutron (2:9.0.0~b2-0ubuntu1) yakkety; urgency=medium [ Corey Bryant ] - * New upstream version. * d/p/drop-ryu-dep.patch: Rebased. + * d/control: Bump min python-fixtures to 3.0.0. [ David Della Vecchia ] - * New upstream version. * d/p/drop-ryu-dep.patch: Rebased. * d/control: Align (build-)depends with upstream. - -- Corey Bryant Mon, 27 Jun 2016 11:32:02 -0400 + [ James Page ] + * New upstream version. + * d/control: Align (Build-)Depends with upstream. + + -- James Page Thu, 14 Jul 2016 11:49:23 +0100 neutron (2:9.0.0~b1-0ubuntu1) yakkety; urgency=medium diff -Nru neutron-9.0.0~b2~dev280/debian/control neutron-9.0.0~b3~dev557/debian/control --- neutron-9.0.0~b2~dev280/debian/control 2016-06-27 15:33:25.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/control 2016-08-29 20:06:44.000000000 +0000 @@ -18,7 +18,7 @@ python-debtcollector (>= 1.2.0), python-designateclient (>= 1.5.0), python-eventlet (>= 0.18.2), - python-fixtures (>= 1.3.1), + python-fixtures (>= 3.0.0), python-greenlet (>= 0.3.2), python-hacking, python-httplib2 (>= 0.7.5), @@ -31,7 +31,7 @@ python-neutronclient (>= 1:4.2.0), python-novaclient (>= 2:2.29.0), python-openvswitch (>= 2.5.0), - python-os-testr (>= 0.4.1), + python-os-testr (>= 0.7.0), python-oslo.cache (>= 1.5.0), python-oslo.concurrency (>= 3.8.0), python-oslo.config (>= 1:3.10.0), @@ -46,7 +46,7 @@ python-oslo.rootwrap (>= 2.0.0), python-oslo.serialization (>= 1.10.0), python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.11.0), + python-oslo.utils (>= 3.14.0), python-oslo.versionedobjects (>= 1.9.1), python-oslosphinx (>= 2.5.0), python-oslotest (>= 1.10.0), @@ -56,7 +56,7 @@ python-pecan (>= 1.0.0), python-pymysql, python-requests (>= 2.10.0), - python-requests-mock (>= 0.7.0), + python-requests-mock (>= 1.0), python-retrying (>= 1.2.3), python-routes (>= 1.12.3), python-ryu (>= 3.30), @@ -215,7 +215,7 @@ python-oslo.rootwrap (>= 2.0.0), python-oslo.serialization (>= 1.10.0), python-oslo.service (>= 1.10.0), - python-oslo.utils (>= 3.11.0), + python-oslo.utils (>= 3.14.0), python-oslo.versionedobjects (>= 1.9.1), python-osprofiler (>= 1.3.0), python-paste, @@ -225,13 +225,13 @@ python-requests (>= 2.10.0), python-retrying (>= 1.2.3), python-routes (>= 1.12.3), + python-ryu (>= 3.30), python-six (>= 1.9.0), python-sqlalchemy (>= 1.0.10), python-stevedore (>= 1.10.0), python-webob (>= 1.2.3), ${misc:Depends}, ${python:Depends}, -Suggests: python-ryu (>= 3.30), Provides: ${python:Provides}, XB-Python-Version: ${python:Versions} Description: Neutron is a virtual network service for Openstack - Python library diff -Nru neutron-9.0.0~b2~dev280/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.init.in neutron-9.0.0~b3~dev557/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.init.in --- neutron-9.0.0~b2~dev280/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.init.in 2016-06-27 15:33:25.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.init.in 2016-08-29 20:06:44.000000000 +0000 @@ -16,3 +16,7 @@ DESC="OpenStack Neutron Linux bridge cleanup" PROJECT_NAME=neutron NAME=${PROJECT_NAME}-linuxbridge-cleanup +DAEMON=/usr/bin/neutron-linuxbridge-cleanup +# --config-file=/etc/neutron/neutron.conf will be appended +# to DAEMON_ARGS later by openstack-pkg-tools +DAEMON_ARGS="--config-file=/etc/neutron/plugins/ml2/linuxbridge_agent.ini" diff -Nru neutron-9.0.0~b2~dev280/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.service.in neutron-9.0.0~b3~dev557/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.service.in --- neutron-9.0.0~b2~dev280/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.service.in 2016-06-27 15:33:25.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/neutron-linuxbridge-agent.neutron-linuxbridge-cleanup.service.in 2016-08-29 20:06:44.000000000 +0000 @@ -2,7 +2,7 @@ Description=OpenStack Neutron Linux bridge cleanup [Service] -type=oneshot +Type=oneshot User=neutron Group=neutron WorkingDirectory=/var/lib/neutron diff -Nru neutron-9.0.0~b2~dev280/debian/patches/drop-ryu-dep.patch neutron-9.0.0~b3~dev557/debian/patches/drop-ryu-dep.patch --- neutron-9.0.0~b2~dev280/debian/patches/drop-ryu-dep.patch 2016-06-27 15:33:25.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/patches/drop-ryu-dep.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -Description: Drop ryu from dependencies - It supports an alternative ovs driver implementation; its - not the default, so deferring hard dependency for now -Author: James Page -Forwarded: not-needed - ---- a/requirements.txt -+++ b/requirements.txt -@@ -20,7 +20,6 @@ - neutron-lib>=0.2.0 # Apache-2.0 - python-neutronclient>=4.2.0 # Apache-2.0 - retrying!=1.3.0,>=1.2.3 # Apache-2.0 --ryu!=4.1,!=4.2,!=4.2.1,>=3.30 # Apache-2.0 - SQLAlchemy<1.1.0,>=1.0.10 # MIT - WebOb>=1.2.3 # MIT - keystoneauth1>=2.7.0 # Apache-2.0 diff -Nru neutron-9.0.0~b2~dev280/debian/patches/flake8-legacy.patch neutron-9.0.0~b3~dev557/debian/patches/flake8-legacy.patch --- neutron-9.0.0~b2~dev280/debian/patches/flake8-legacy.patch 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/patches/flake8-legacy.patch 2016-08-29 20:06:44.000000000 +0000 @@ -0,0 +1,15 @@ +Description: Use legacy API as provided in flake8 >= 3.0.0 +Author: James Page +Forwarded: not-needed + +--- a/neutron/tests/unit/hacking/test_checks.py ++++ b/neutron/tests/unit/hacking/test_checks.py +@@ -12,7 +12,7 @@ + + import re + +-from flake8 import engine ++from flake8.api import legacy as engine + from hacking.tests import test_doctest as hacking_doctest + import pep8 + import testscenarios diff -Nru neutron-9.0.0~b2~dev280/debian/patches/series neutron-9.0.0~b3~dev557/debian/patches/series --- neutron-9.0.0~b2~dev280/debian/patches/series 2016-06-27 15:33:25.000000000 +0000 +++ neutron-9.0.0~b3~dev557/debian/patches/series 2016-08-29 20:06:44.000000000 +0000 @@ -1,2 +1,2 @@ skip-iptest.patch -drop-ryu-dep.patch +flake8-legacy.patch diff -Nru neutron-9.0.0~b2~dev280/devstack/lib/ovs neutron-9.0.0~b3~dev557/devstack/lib/ovs --- neutron-9.0.0~b2~dev280/devstack/lib/ovs 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/devstack/lib/ovs 2016-08-03 20:10:33.000000000 +0000 @@ -12,7 +12,7 @@ OVS_REPO=${OVS_REPO:-https://github.com/openvswitch/ovs.git} OVS_REPO_NAME=$(basename ${OVS_REPO} | cut -f1 -d'.') -OVS_BRANCH=${OVS_BRANCH:-origin/master} +OVS_BRANCH=${OVS_BRANCH:-master} # Functions diff -Nru neutron-9.0.0~b2~dev280/devstack/lib/trunk neutron-9.0.0~b3~dev557/devstack/lib/trunk --- neutron-9.0.0~b2~dev280/devstack/lib/trunk 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/devstack/lib/trunk 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,7 @@ +function configure_trunk_service_plugin { + _neutron_service_plugin_class_add "trunk" +} + +function configure_trunk_extension { + configure_trunk_service_plugin +} diff -Nru neutron-9.0.0~b2~dev280/devstack/plugin.sh neutron-9.0.0~b3~dev557/devstack/plugin.sh --- neutron-9.0.0~b2~dev280/devstack/plugin.sh 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/devstack/plugin.sh 2016-08-03 20:10:33.000000000 +0000 @@ -6,6 +6,7 @@ source $LIBDIR/ml2 source $LIBDIR/qos source $LIBDIR/ovs +source $LIBDIR/trunk Q_BUILD_OVS_FROM_GIT=$(trueorfalse False Q_BUILD_OVS_FROM_GIT) @@ -22,6 +23,9 @@ if is_service_enabled q-qos; then configure_qos fi + if is_service_enabled q-trunk; then + configure_trunk_extension + fi if [[ "$Q_AGENT" == "openvswitch" ]] && \ [[ "$Q_BUILD_OVS_FROM_GIT" == "True" ]]; then remove_ovs_packages diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/agent_extensions.rst neutron-9.0.0~b3~dev557/doc/source/devref/agent_extensions.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/agent_extensions.rst 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/agent_extensions.rst 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,75 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Agent extensions +================ + +All reference agents utilize a common extension mechanism that allows for the +introduction and enabling of a core resource extension without needing to +change agent code. This mechanism allows multiple agent extensions to be run by +a single agent simultaneously. The mechanism may be especially interesting to +third parties whose extensions lie outside the neutron tree. + +Under this framework, an agent may expose its API to each of its extensions +thereby allowing an extension to access resources internal to the agent. At +layer 2, for instance, upon each port event the agent is then able to trigger a +handle_port method in its extensions. + +Each extension is referenced through a stevedore entry point defined within a +specific namespace. For example, L2 extensions are referenced through the +neutron.agent.l2.extensions namespace. + +The relevant modules are: + +* neutron.agent.agent_extension: + This module defines an abstract extension interface for all agent + extensions across L2 and L3. + +* neutron.agent.l2.agent_extension: +* neutron.agent.l3.agent_extension: + These modules subclass + neutron.agent.agent_extension.AgentExtension and define a + layer-specific abstract extension interface. + +* neutron.agent.agent_extensions_manager: + This module contains a manager that allows extensions to load themselves at + runtime. + +* neutron.agent.l2.l2_extensions_manager: +* neutron.agent.l3.l3_extensions_manager: + Each of these modules passes core resource events to loaded extensions. + + +Agent API object +---------------- + +Every agent can pass an "agent API object" into its extensions in order to +expose its internals to them in a controlled way. To accommodate different +agents, each extension may define a consume_api() method that will receive +this object. + +This agent API object is part of neutron's public interface for third parties. +All changes to the interface will be managed in a backwards-compatible way. + +At the moment, only the L2 Open vSwitch agent provides an agent API object to +extensions. See :doc:`L2 agent extensions `. diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/alembic_migrations.rst neutron-9.0.0~b3~dev557/doc/source/devref/alembic_migrations.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/alembic_migrations.rst 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/alembic_migrations.rst 2016-08-03 20:10:33.000000000 +0000 @@ -417,6 +417,27 @@ depends_on = ('',) +Expand and Contract Branch Exceptions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In some cases, we have to have "expand" operations in contract migrations. For +example, table 'networksegments' was renamed in contract migration, so all +operations with this table are required to be in contract branch as well. +For such cases, we use the ``contract_creation_exceptions`` that should be +implemented as part of such migrations. This is needed to get functional tests +pass. + +Usage:: + + def contract_creation_exceptions(): + """Docstring should explain why we allow such exception for contract + branch. + """ + return { + sqlalchemy_obj_type: ['name'] + # For example: sa.Column: ['subnets.segment_id'] + } + HEAD files for conflict management ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/callbacks.rst neutron-9.0.0~b3~dev557/doc/source/devref/callbacks.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/callbacks.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/callbacks.rst 2016-08-03 20:10:33.000000000 +0000 @@ -359,7 +359,7 @@ notified. Priorities can be a future extension, if a use case arises that require enforced ordering. -How is the the notifying object expected to interact with the subscribing objects? +How is the notifying object expected to interact with the subscribing objects? The ``notify`` method implements a one-way communication paradigm: the notifier sends a message without expecting a response back (in other words it fires and forget). However, due to the nature diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/calling_ml2_plugin.rst neutron-9.0.0~b3~dev557/doc/source/devref/calling_ml2_plugin.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/calling_ml2_plugin.rst 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/calling_ml2_plugin.rst 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,47 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Calling the ML2 Plugin +====================== + +When writing code for an extension, service plugin, or any other part of +Neutron you must not call core plugin methods that mutate state while +you have a transaction open on the session that you pass into the core +plugin method. + +The create and update methods for ports, networks, and subnets in ML2 +all have a precommit phase and postcommit phase. During the postcommit +phase, the data is expected to be fully persisted to the database and +ML2 drivers will use this time to relay information to a backend outside +of Neutron. Calling the ML2 plugin within a transaction would violate +this semantic because the data would not be persisted to the DB; and, +were a failure to occur that caused the whole transaction to be rolled +back, the backend would become inconsistent with the state in Neutron's +DB. + +To prevent this, these methods are protected with a decorator that will +raise a RuntimeError if they are called with context that has a session +in an active transaction. The decorator can be found at +neutron.common.utils.transaction_guard and may be used in other places +in Neutron to protect functions that are expected to be called outside +of a transaction. diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/contribute.rst neutron-9.0.0~b3~dev557/doc/source/devref/contribute.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/contribute.rst 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/contribute.rst 2016-08-03 20:10:33.000000000 +0000 @@ -118,7 +118,7 @@ documents, within the same third-party library repo. If changes to the common Neutron code are required, an `RFE `_ -may need to be filed. However every case is different and you are invited to +may need to be filed. However, every case is different and you are invited to seek guidance from Neutron core reviewers about what steps to follow. @@ -126,7 +126,7 @@ ---------------------------------- The following strategies are recommendations only, since third-party CI testing -is not a enforced requirement. However, these strategies are employed by the +is not an enforced requirement. However, these strategies are employed by the majority of the plugin/driver contributors that actively participate in the Neutron development community, since they have learned from experience how quickly their code can fall out of sync with the rapidly changing Neutron core @@ -188,7 +188,7 @@ The OpenStack VMT directly oversees vulnerability reporting and disclosure for a `subset of OpenStack source code repositories -`_. However they +`_. However, they are still quite happy to answer any questions you might have about vulnerability management for your own projects even if they're not part of that set. Feel free to reach out to the VMT in public or in private. @@ -202,7 +202,7 @@ - Does a CVE need to be filed? It can vary widely. If a commercial distribution such as Red Hat is -redistributing a vulnerable version of your software then they may assign one +redistributing a vulnerable version of your software, then they may assign one anyway even if you don't request one yourself. Or the reporter may request one; the reporter may even be affiliated with an organization who has already assigned/obtained a CVE before they initiate contact with you. @@ -329,7 +329,7 @@ Each project is recommended to support i18n. This section describes how to set up translation support. -The description in this section uses the following variables. +The description in this section uses the following variables: * repository : ``openstack/${REPOSITORY}`` (e.g., ``openstack/networking-foo``) * top level python path : ``${MODULE_NAME}`` (e.g., ``networking_foo``) diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/db_layer.rst neutron-9.0.0~b3~dev557/doc/source/devref/db_layer.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/db_layer.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/db_layer.rst 2016-08-29 20:05:49.000000000 +0000 @@ -76,7 +76,7 @@ To address this issue, the 'standardattribute' table is available. Any model can add support for this table by inheriting the 'HasStandardAttributes' mixin -in neutron.db.model_base. This mixin will add a standard_attr_id BigInteger +in neutron.db.standard_attr. This mixin will add a standard_attr_id BigInteger column to the model with a foreign key relationship to the 'standardattribute' table. The model will then be able to access any columns of the 'standardattribute' table and any tables related to it. diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/db_models.rst neutron-9.0.0~b3~dev557/doc/source/devref/db_models.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/db_models.rst 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/db_models.rst 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,49 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + +Relocation of Database Models +============================= + +This document is intended to track and notify developers that db models in +neutron will be centralized and moved to a new tree under neutron/db/models. +This was discussed in [1]. The reason for relocating db models is to solve +the cyclic import issue while implementing oslo versioned objects for +resources in neutron. + +The reason behind this relocation is Mixin class and db models for some +resources in neutron are in same module. In Mixin classes, there are methods +which provide functionality of fetching, adding, updating and deleting data +via queries. These queries will be replaced with use of versioned objects and +definition of versioned object will be using db models. So object files will +be importing models and Mixin need to import those objects which will end up +in cyclic import. + +Structure of Model Definitions +------------------------------ + +We have decided to move all models definitions to neutron/db/models/ +with no futher nesting after that point. The deprecation method to move +models is already been added to avoid breakage of third party plugins using +those models. All relocated models need to use deprecate method that +will generate a warning and return new class for use of old class. Some +examples of relocated models [2] and [3]. In future if you define new models +please make sure they are separated from mixins and are under tree +neutron/db/models/ . + +References +~~~~~~~~~~ + +[1]. https://www.mail-archive.com/openstack-dev@lists.openstack.org/msg88910.html +[2]. https://review.openstack.org/#/c/348562/ +[3]. https://review.openstack.org/#/c/348757/ diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/effective_neutron.rst neutron-9.0.0~b3~dev557/doc/source/devref/effective_neutron.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/effective_neutron.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/effective_neutron.rst 2016-08-03 20:10:33.000000000 +0000 @@ -105,7 +105,7 @@ q = query(Object.id, Object.name, func.count(Object.number)).group_by(Object.id, Object.name) -* Beware of the `InvalidRequestError `_ exception. +* Beware of the `InvalidRequestError `_ exception. There is even a `Neutron bug `_ registered for it. Bear in mind that this error may also occur when nesting transaction blocks, and the innermost block raises an error without proper @@ -431,7 +431,7 @@ and send public questions to the channel rather then to a specific person if possible. (This increase the chances of getting faster answers to your questions). A list of the areas and lieutenants nicknames can be found at - `Core Reviewers `_. + `Core Reviewers `_. Commit messages ~~~~~~~~~~~~~~~ diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/index.rst neutron-9.0.0~b3~dev557/doc/source/devref/index.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/index.rst 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/index.rst 2016-08-29 20:05:49.000000000 +0000 @@ -55,6 +55,7 @@ services_and_agents api_layer ml2_ext_manager + calling_ml2_plugin quota api_extensions plugin-api @@ -64,6 +65,7 @@ rpc_callbacks layer3 l2_agents + agent_extensions ovs_vhostuser quality_of_service service_extensions diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/l2_agent_extensions.rst neutron-9.0.0~b3~dev557/doc/source/devref/l2_agent_extensions.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/l2_agent_extensions.rst 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/l2_agent_extensions.rst 2016-08-03 20:10:33.000000000 +0000 @@ -24,38 +24,8 @@ L2 agent extensions =================== -All reference agents support common extension mechanism that allows to easily -reuse code between agents and to avoid the need to patch an agent for each new -core resource extension. Those extensions can be especially interesting to -third parties that don't want to maintain their code in Neutron tree. - -Extensions are referenced through stevedore entry points defined under -neutron.agent.l2.extensions namespace. On each port event, handle_port is -triggered by the agent. - -* neutron.agent.l2.agent_extension: - This module defines an abstract extension interface. - -* neutron.agent.l2.extensions.manager: - This module contains a manager that allows to register multiple extensions, - and passes handle_port events down to all enabled extensions. - - -Agent API object ----------------- - -Every agent can pass a so-called agent API object into extensions to expose -some of its internals to them in controlled way. - -If an extension is interested in using the object, it should define -consume_api() method that will receive the object before extension's -initialize() method is called by the extension manager. - -This agent API object is part of public Neutron interface for third parties. -All changes to the interface will be managed in backwards compatible way. - -At the moment, only Open vSwitch agent provides an agent API object to -extensions. +L2 agent extensions are part of a generalized L2/L3 extension framework. See +:doc:`agent extensions `. Open vSwitch agent API ~~~~~~~~~~~~~~~~~~~~~~ diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/layer3.rst neutron-9.0.0~b3~dev557/doc/source/devref/layer3.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/layer3.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/layer3.rst 2016-08-29 20:05:49.000000000 +0000 @@ -170,7 +170,7 @@ Finding the router in ip/ipconfig --------------------------------- -* http://docs.openstack.org/admin-guide-cloud/networking.html +* http://docs.openstack.org/admin-guide/networking.html The neutron-l3-agent uses the Linux IP stack and iptables to perform L3 forwarding and NAT. In order to support multiple routers with potentially overlapping IP addresses, neutron-l3-agent @@ -212,11 +212,11 @@ Provider Networking ------------------- -Neutron can also be configured to create `provider networks `_ +Neutron can also be configured to create `provider networks `_ Further Reading --------------- * `Packet Pushers - Neutron Network Implementation on Linux `_ -* `OpenStack Cloud Administrator Guide `_ +* `OpenStack Administrator Guide `_ * `Neutron - Layer 3 API extension usage guide `_ * `Darragh O'Reilly - The Quantum L3 router and floating IPs `_ diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/openvswitch_agent.rst neutron-9.0.0~b3~dev557/doc/source/devref/openvswitch_agent.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/openvswitch_agent.rst 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/openvswitch_agent.rst 2016-08-29 20:05:49.000000000 +0000 @@ -21,24 +21,24 @@ (Avoid deeper levels because they do not render well.) -OpenVSwitch L2 Agent -==================== +Open vSwitch L2 Agent +===================== -This Agent uses the `OpenVSwitch`_ virtual switch to create L2 +This Agent uses the `Open vSwitch`_ virtual switch to create L2 connectivity for instances, along with bridges created in conjunction with OpenStack Nova for filtering. ovs-neutron-agent can be configured to use different networking technologies to create project isolation. These technologies are implemented as ML2 type drivers which are used in -conjunction with the OpenVSwitch mechanism driver. +conjunction with the Open vSwitch mechanism driver. VLAN Tags --------- .. image:: images/under-the-hood-scenario-1-ovs-compute.png -.. _OpenVSwitch: http://openvswitch.org +.. _Open vSwitch: http://openvswitch.org GRE Tunnels @@ -151,7 +151,7 @@ platform's point of view discerning which tagged packets are meant to be treated 'transparently' and which ones are meant to be used for demultiplexing (in order to reach the right destination). - The outcome might only be predicatble if two layers of vlan tags + The outcome might only be predictable if two layers of vlan tags are stacked up together, making guest support even more crucial for the combined use case. @@ -233,7 +233,7 @@ and ports participating a trunk with no ability to convert from one to the other (and vice versa), no migration is required. This is done at a cost of some loss of flexibility - and maintainance complexity. + and maintenance complexity. * Design reusability: a solution to support vlan trunking for the Linux Bridge mech driver will still be required to avoid widening the gap with Open vSwitch (e.g. OVS has DVR but @@ -254,7 +254,7 @@ To summarize: * VLAN interfaces (A) are compelling because will lead to a relatively - contained engineering cost at the expenses of performance. The Open + contained engineering cost at the expense of performance. The Open vSwitch community will need to be involved in order to deliver vlan transparency. Irrespective of whether this strategy is chosen for Open vSwitch or not, this is still the only viable approach for Linux @@ -263,7 +263,7 @@ strategy for OVS deployments that are unable to adopt DPDK. * Open Flow (B) is compelling because it will allow Neutron to unlock - the full potential of Open vSwitch, at the expenses of development + the full potential of Open vSwitch, at the expense of development and operations effort. The development is confined within the boundaries of the Neutron community in order to address vlan awareness and transparency (as two distinct use cases, ie. to be adopted @@ -275,7 +275,7 @@ * Trunk Bridges (C) tries to bring the best of option A and B together as far as OVS development and performance are concerned, but it - comes at the expenses of maintainance complexity and loss of flexibility. + comes at the expense of maintenance complexity and loss of flexibility. A Linux Bridge solution would still be required and, QinQ support will still be needed to address vlan transparency. @@ -295,12 +295,307 @@ this option is not at all appealing in the long term. Embracing option (B) in the long run may be complicated by the adoption of -option (C). The development and maintainance complexity involved in Option +option (C). The development and maintenance complexity involved in Option (C) and (B) respectively poses the existential question as to whether investing in the agent-based architecture is an effective strategy, especially if the end result would look a lot like other maturing alternatives. +Implementation VLAN Interfaces (Option A) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This implementation doesn't require any modification of the vif-drivers since +Nova will plug the vif of the VM the same way as it does for traditional ports. + +Trunk port creation ++++++++++++++++++++ +A VM is spawned passing to Nova the port-id of a parent port associated with +a trunk. Nova/libvirt will create the tap interface and will plug it into +br-int or into the firewall bridge if using iptables firewall. In the +external-ids of the port Nova will store the port ID of the parent port. +The OVS agent detects that a new vif has been plugged. It gets +the details of the new port and wires it. +The agent configures it in the same way as a traditional port: packets coming out +from the VM will be tagged using the internal VLAN ID associated to the network, +packets going to the VM will be stripped of the VLAN ID. +After wiring it successfully the OVS agent will send a message notifying Neutron +server that the parent port is up. Neutron will send back to Nova an event to +signal that the wiring was successful. +If the parent port is associated with one or more subports the agent will process +them as described in the next paragraph. + +Subport creation +++++++++++++++++ +If a subport is added to a parent port but no VM was booted using that parent port +yet, no L2 agent will process it (because at that point the parent port is +not bound to any host). +When a subport is created for a parent port and a VM that uses that parent port is +already running, the OVS agent will create a VLAN interface on the VM tap +using the VLAN ID specified in the subport segmentation id. There's a small possibility +that a race might occur: the firewall bridge might be created and plugged while the vif +is not there yet. The OVS agent needs to check if the vif exists before trying to create +a subinterface. +Let's see how the models differ when using the iptables firewall or the ovs native +firewall. + +Iptables Firewall +''''''''''''''''' + +:: + + +----------------------------+ + | VM | + | eth0 eth0.100 | + +-----+-----------------+----+ + | + | + +---+---+ +-----+-----+ + | tap1 |-------| tap1.100 | + +---+---+ +-----+-----+ + | | + | | + +---+---+ +---+---+ + | qbr1 | | qbr2 | + +---+---+ +---+---+ + | | + | | + +-----+-----------------+----+ + | port 1 port 2 | + | (tag 3) (tag 5) | + | br-int | + +----------------------------+ + +Let's assume the subport is on network2 and uses segmentation ID 100. +In the case of hybrid plugging the OVS agent will have to create the firewall +bridge (qbr2), create tap1.100 and plug it into qbr2. It will connect qbr2 to +br-int and set the subport ID in the external-ids of port 2. + +*Inbound traffic from the VM point of view* + +The untagged traffic will flow from port 1 to eth0 through qbr1. +For the traffic coming out of port 2, the internal VLAN ID of network2 will be +stripped. The packet will then go untagged through qbr2 where +iptables rules will filter the traffic. The tag 100 will be pushed by tap1.100 +and the packet will finally get to eth0.100. + +*Outbound traffic from the VM point of view* + +The untagged traffic will flow from eth0 to port1 going through qbr1 where +firewall rules will be applied. Traffic tagged with VLAN 100 will leave eth0.100, +go through tap1.100 where the VLAN 100 is stripped. It will reach qbr2 where +iptables rules will be applied and go to port 2. The internal VLAN of network2 +will be pushed by br-int when the packet enters port2 because it's a tagged port. + + +OVS Firewall case +''''''''''''''''' + +:: + + +----------------------------+ + | VM | + | eth0 eth0.100 | + +-----+-----------------+----+ + | + | + +---+---+ +-----+-----+ + | tap1 |-------| tap1.100 | + +---+---+ +-----+-----+ + | | + | | + | | + +-----+-----------------+----+ + | port 1 port 2 | + | (tag 3) (tag 5) | + | br-int | + +----------------------------+ + +When a subport is created the OVS agent will create the VLAN interface tap1.100 and +plug it into br-int. Let's assume the subport is on network2. + +*Inbound traffic from the VM point of view* + +The traffic will flow untagged from port 1 to eth0. The traffic going out from port 2 +will be stripped of the VLAN ID assigned to network2. It will be filtered by the rules +installed by the firewall and reach tap1.100. +tap1.100 will tag the traffic using VLAN 100. It will then reach the VM's eth0.100. + +*Outbound traffic from the VM point of view* + +The untagged traffic will flow and reach port 1 where it will be tagged using the +VLAN ID associated to the network. Traffic tagged with VLAN 100 will leave eth0.100 +reach tap1.100 where VLAN 100 will be stripped. It will then reach port2. +It will be filtered by the rules installed by the firewall on port 2. Then the packets +will be tagged using the internal VLAN associated to network2 by br-int since port 2 is a +tagged port. + +Parent port deletion +++++++++++++++++++++ + +Deleting a port that is an active parent in a trunk is forbidden. If the parent port has +no trunk associated (it's a "normal" port), it can be deleted. +The OVS agent doesn't need to perform any action, the deletion will result in a removal +of the port data from the DB. + + +Trunk deletion +++++++++++++++ + +When Nova deletes a VM, it deletes the VM's corresponding Neutron ports only if they were +created by Nova when booting the VM. In the vlan-aware-vm case the parent port is passed to Nova, so +the port data will remain in the DB after the VM deletion. Nova will delete +the VIF of the VM (in the example tap1) as part of the VM termination. The OVS agent +will detect that deletion and notify the Neutron server that the parent port is down. +The OVS agent will clean up the corresponding subports as explained in the next paragraph. + +The deletion of a trunk that is used by a VM is not allowed. +The trunk can be deleted (leaving the parent port intact) when the parent port is not +used by any VM. After the trunk is deleted, the parent port can also be deleted. + +Subport deletion +++++++++++++++++ + +Removing a subport that is associated with a parent port that was not used to boot any +VM is a no op from the OVS agent perspective. +When a subport associated with a parent port that was used to boot a VM is deleted, +the OVS agent will take care of removing the firewall bridge if using iptables firewall +and the port on br-int. + + +Implementation Trunk Bridge (Option C) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This implementation is based on this `etherpad `_. +Credits to Bence Romsics. +The option use_veth_interconnection=true won't be supported, it will probably be deprecated soon, +see [1]. + +:: + + +--------------------------------+ + | VM | + | eth0 eth0.100 | + +-----+--------------------+-----+ + | + | + +-----+--------------------------+ + | tap1 | + | br-trunk-1 | + | | + | tp-patch-trunk sp-patch-trunk | + | (tag 100) | + +-----+-----------------+--------+ + | | + | | + | | + +-----+-----------------+---------+ + | tp-patch-int sp-patch-int | + | (tag 3) (tag 5) | + | br-int | + +---------------------------------+ + +tp-patch-trunk: trunk bridge side of the patch port that implements a trunk +tp-patch-int: int bridge side of the patch port that implements a trunk +sp-patch-trunk: trunk bridge side of the patch port that implements a subport +sp-patch-int: int bridge side of the patch port that implements a subport + +[1] https://bugs.launchpad.net/neutron/+bug/1587296 + +Trunk creation +++++++++++++++ + +A VM is spawned passing to Nova the port-id of a parent port associated with +a trunk. Neutron will pass to Nova the bridge where to plug the vif as part of the vif details. +The os-vif driver creates the trunk bridge br-trunk-1 if it does not exist in plug(). +It will create the tap interface tap1 and plug it into br-trunk-1 setting the parent port ID in the external-ids. +The OVS agent will be monitoring the creation of ports on the trunk bridges. When it detects +that a new port has been created on the trunk bridge, it will do the following: + +:: + + ovs-vsctl add-port br-trunk-1 tp-patch-trunk -- set Interface tp-patch-trunk type=patch options:peer=tp-patch-int + ovs-vsctl add-port br-int tp-patch-int tag=3 -- set Interface tp-patch-int type=patch options:peer=tp-patch-trunk + + +A patch port is created to connect the trunk bridge to the integration bridge. +tp-patch-trunk, the trunk bridge side of the patch is not associated to any +tag. It will carry untagged traffic. +tp-patch-int, the br-int side the patch port is tagged with VLAN 3. We assume that the +trunk is on network1 that on this host is associated with VLAN 3. +The OVS agent will set the trunk ID in the external-ids of tp-patch-trunk and tp-patch-int. +If the parent port is associated with one or more subports the agent will process them as +described in the next paragraph. + +Subport creation +++++++++++++++++ + +If a subport is added to a parent port but no VM was booted using that parent port +yet, the agent won't process the subport (because at this point there's no node +associated with the parent port). +When a subport is added to a parent port that is used by a VM the OVS agent will +create a new patch port: + +:: + + ovs-vsctl add-port br-trunk-1 sp-patch-trunk tag=100 -- set Interface sp-patch-trunk type=patch options:peer=sp-patch-int + ovs-vsctl add-port br-int sp-patch-int tag=5 -- set Interface sp-patch-int type=patch options:peer=sp-patch-trunk + +This patch port connects the trunk bridge to the integration bridge. +sp-patch-trunk, the trunk bridge side of the patch is tagged using VLAN 100. +We assume that the segmentation ID of the subport is 100. +sp-patch-int, the br-int side of the patch port is tagged with VLAN 5. We +assume that the subport is on network2 that on this host uses VLAN 5. +The OVS agent will set the subport ID in the external-ids of sp-patch-trunk and sp-patch-int. + +*Inbound traffic from the VM point of view* + +The traffic coming out of tp-patch-int will be stripped by br-int of VLAN 3. +It will reach tp-patch-trunk untagged and from there tap1. +The traffic coming out of sp-patch-int will be stripped by br-int of VLAN 5. +It will reach sp-patch-trunk where it will be tagged with VLAN 100 and it will +then get to tap1 tagged. + + +*Outbound traffic from the VM point of view* + +The untagged traffic coming from tap1 will reach tp-patch-trunk and from there +tp-patch-int where it will be tagged using VLAN 3. +The traffic tagged with VLAN 100 from tap1 will reach sp-patch-trunk. +VLAN 100 will be stripped since sp-patch-trunk is a tagged port and the packet +will reach sp-patch-int, where it's tagged using VLAN 5. + +Parent port deletion +++++++++++++++++++++ + +Deleting a port that is an active parent in a trunk is forbidden. If the parent port has +no trunk associated, it can be deleted. The OVS agent doesn't need to perform any action. + +Trunk deletion +++++++++++++++ + +When Nova deletes a VM, it deletes the VM's corresponding Neutron ports only if they were +created by Nova when booting the VM. In the vlan-aware-vm case the parent port is passed to Nova, so +the port data will remain in the DB after the VM deletion. Nova will delete +the port on the trunk bridge where the VM is plugged. The L2 agent +will detect that and delete the trunk bridge. It will notify the Neutron server that the parent +port is down. + +The deletion of a trunk that is used by a VM is not allowed. +The trunk can be deleted (leaving the parent port intact) when the parent port is not +used by any VM. After the trunk is deleted, the parent port can also be deleted. + +Subport deletion +++++++++++++++++ + +The OVS agent will delete the patch port pair corresponding to the subport deleted. + +Agent resync +~~~~~~~~~~~~ + +During resync the agent should check that all the trunk and subports are +still valid. It will delete the stale trunk and subports using the procedure specified +in the previous paragraphs according to the implementation. + + Further Reading --------------- diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/quality_of_service.rst neutron-9.0.0~b3~dev557/doc/source/devref/quality_of_service.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/quality_of_service.rst 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/quality_of_service.rst 2016-08-29 20:05:49.000000000 +0000 @@ -138,7 +138,10 @@ * QosPolicy: directly maps to the conceptual policy resource. * QosNetworkPolicyBinding, QosPortPolicyBinding: defines attachment between a Neutron resource and a QoS policy. -* QosBandwidthLimitRule: defines the only rule type available at the moment. +* QosBandwidthLimitRule: defines the rule to limit the maximum egress + bandwidth. +* QosDscpMarkingRule: defines the rule that marks the Differentiated Service + bits for egress traffic. All database models are defined under: @@ -362,7 +365,7 @@ The ingress bandwidth limit is configured on the tap port by setting a simple `tc-tbf `_ queueing discipline (qdisc) on the port. It requires a value of HZ parameter configured in kernel on the host. -This value is neccessary to calculate the minimal burst value which is set in +This value is necessary to calculate the minimal burst value which is set in tc. Details about how it is calculated can be found in `here `_. This solution is similar to Open vSwitch implementation. diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/quota.rst neutron-9.0.0~b3~dev557/doc/source/devref/quota.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/quota.rst 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/quota.rst 2016-08-03 20:10:33.000000000 +0000 @@ -217,7 +217,7 @@ the transaction which creates the reservation. The lock is acquired when reading usage data. In case of write-set certification failures, which can occur in active/active clusters such as MySQL galera, the decorator -oslo_db.api.wrap_db_retry will retry the transaction if a DBDeadLock +neutron.db.api.retry_db_errors will retry the transaction if a DBDeadLock exception is raised. While non-locking approaches are possible, it has been found out that, since a non-locking algorithms increases the chances of collision, the cost of diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/rpc_callbacks.rst neutron-9.0.0~b3~dev557/doc/source/devref/rpc_callbacks.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/rpc_callbacks.rst 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/rpc_callbacks.rst 2016-08-03 20:10:33.000000000 +0000 @@ -211,10 +211,10 @@ from neutron.api.rpc.callbacks import resources - def process_resource_updates(resource_type, resource, event_type): + def process_resource_updates(resource_type, resource_list, event_type): # send to the right handler which will update any control plane - # details related to the updated resource... + # details related to the updated resources... def subscribe_resources(): @@ -238,7 +238,7 @@ The callback function will receive the following arguments: * resource_type: the type of resource which is receiving the update. -* resource: resource of supported object +* resource_list: list of resources which have been pushed by server. * event_type: will be one of CREATED, UPDATED, or DELETED, see neutron.api.rpc.callbacks.events for details. @@ -263,9 +263,22 @@ ----------------------- On the server side, resource updates could come from anywhere, a service plugin, -an extension, anything that updates, creates, or destroys the resource and that +an extension, anything that updates, creates, or destroys the resources and that is of any interest to subscribed agents. +A callback is expected to receive a list of resources. When resources in the list +belong to the same resource type, a single push RPC message is sent; if the list +contains objects of different resource types, resources of each type are grouped +and sent separately, one push RPC message per type. On the receiver side, +resources in a list always belong to the same type. In other words, a server-side +push of a list of heterogenous objects will result into N messages on bus and +N client-side callback invocations, where N is the number of unique resource +types in the given list, e.g. L(A, A, B, C, C, C) would be fragmented into +L1(A, A), L2(B), L3(C, C, C), and each list pushed separately. + +Note: there is no guarantee in terms of order in which separate resource lists +will be delivered to consumers. + The server/publisher side may look like:: from neutron.api.rpc.callbacks.producer import registry @@ -274,17 +287,17 @@ def create_qos_policy(...): policy = fetch_policy(...) update_the_db(...) - registry.push(policy, events.CREATED) + registry.push([policy], events.CREATED) def update_qos_policy(...): policy = fetch_policy(...) update_the_db(...) - registry.push(policy, events.UPDATED) + registry.push([policy], events.UPDATED) def delete_qos_policy(...): policy = fetch_policy(...) update_the_db(...) - registry.push(policy, events.DELETED) + registry.push([policy], events.DELETED) References diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/services_and_agents.rst neutron-9.0.0~b3~dev557/doc/source/devref/services_and_agents.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/services_and_agents.rst 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/services_and_agents.rst 2016-08-29 20:05:49.000000000 +0000 @@ -83,3 +83,27 @@ Only the neutron-server connects to the neutron database. Agents may never connect directly to the database, as this would break the ability to do rolling upgrades. + +Configuration Options +--------------------- + +In addition to database access, configuration options are segregated between +neutron-server and agents. Both services and agents may load the main +```neutron.conf``` since this file should contain the oslo.messaging +configuration for internal Neutron RPCs and may contain host specific +configuration such as file paths. In addition ```neutron.conf``` contains the +database, Keystone, and Nova credentials and endpoints strictly for +neutron-server to use. + +In addition neutron-server may load a plugin specific configuration file, yet +the agents should not. As the plugin configuration is primarily site wide +options and the plugin provides the persistence layer for Neutron, agents +should be instructed to act upon these values via RPC. + +Each individual agent may have its own configuration file. This file should be +loaded after the main ```neutron.conf``` file, so the agent configuration takes +precedence. The agent specific configuration may contain configurations which +vary between hosts in a Neutron deployment such as the external_network_bridge +for a L3 agent. If any agent requires access to additional external services +beyond the Neutron RPC, those endpoints should be defined in the agent specific +configuration file (e.g. nova metadata for metadata agent). diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/tag.rst neutron-9.0.0~b3~dev557/doc/source/devref/tag.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/tag.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/tag.rst 2016-08-03 20:10:33.000000000 +0000 @@ -41,7 +41,7 @@ 3) Leverage tags by deployment tools 4) allow operators to tag information about provider networks - (e.g. high-bandwith, low-latency, etc) + (e.g. high-bandwidth, low-latency, etc) 5) new features like get-me-a-network or a similar port scheduler could choose a network for a port based on tags diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/template_model_sync_test.rst neutron-9.0.0~b3~dev557/doc/source/devref/template_model_sync_test.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/template_model_sync_test.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/template_model_sync_test.rst 2016-08-03 20:10:33.000000000 +0000 @@ -98,8 +98,8 @@ from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration - from neutron.tests.common import base from neutron.tests.functional.db import test_migrations + from neutron.tests.unit import testlib_api from networking_foo.db.migration import alembic_migrations from networking_foo.db.models import head @@ -130,13 +130,15 @@ return True - class TestModelsMigrationsMysql(_TestModelsMigrationsFoo, - base.MySQLTestCase): + class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, + _TestModelsMigrationsFoo, + testlib_api.SqlTestCaseLight): pass - class TestModelsMigrationsPsql(_TestModelsMigrationsFoo, - base.PostgreSQLTestCase): + class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, + _TestModelsMigrationsFoo, + testlib_api.SqlTestCaseLight): pass diff -Nru neutron-9.0.0~b2~dev280/doc/source/devref/testing_coverage.rst neutron-9.0.0~b3~dev557/doc/source/devref/testing_coverage.rst --- neutron-9.0.0~b2~dev280/doc/source/devref/testing_coverage.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/devref/testing_coverage.rst 2016-08-03 20:10:33.000000000 +0000 @@ -52,31 +52,32 @@ * X - Absent or lacking * Patch number - Currently in review * A name - That person has committed to work on an item +* Implicit - The code is executed, yet no assertions are made +------------------------+------------+------------+------------+------------+------------+------------+ | Area | Unit | Functional | API | Fullstack | Scenario | Gate | +========================+============+============+============+============+============+============+ -| DVR | Partial* | L3-V OVS-X | V | amuller | X | V | +| DVR | V | L3-V OVS-X | V | X | X | V | +------------------------+------------+------------+------------+------------+------------+------------+ -| L3 HA | V | V | X | 196393 | X | X | +| L3 HA | V | V | X | 286087 | X | X | +------------------------+------------+------------+------------+------------+------------+------------+ -| L2pop | V | X | | X | | | +| L2pop | V | X | | Implicit | | | +------------------------+------------+------------+------------+------------+------------+------------+ | DHCP HA | V | | | amuller | | | +------------------------+------------+------------+------------+------------+------------+------------+ | OVS ARP responder | V | X* | | X* | | | +------------------------+------------+------------+------------+------------+------------+------------+ -| OVS agent | V | Partial | | V | | V | +| OVS agent | V | V | | V | | V | +------------------------+------------+------------+------------+------------+------------+------------+ -| Linux Bridge agent | V | X | | X | | Non-voting | +| Linux Bridge agent | V | X | | V | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | Metering | V | X | V | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ -| DHCP agent | V | 136834 | | amuller | | V | +| DHCP agent | V | V | | amuller | | V | +------------------------+------------+------------+------------+------------+------------+------------+ | rpc_workers | | | | | | X | +------------------------+------------+------------+------------+------------+------------+------------+ -| Reference ipam driver | V | | | | | X (?) | +| Reference ipam driver | V | | | | | X | +------------------------+------------+------------+------------+------------+------------+------------+ | MTU advertisement | V | | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ @@ -85,15 +86,13 @@ | Prefix delegation | V | X | | X | | | +------------------------+------------+------------+------------+------------+------------+------------+ -* DVR DB unit tests often assert that internal methods were called instead of - testing functionality. A lot of our unit tests are flawed in this way, - and DVR unit tests especially so. An attempt to remedy this was made - in patch 178880. -* OVS ARP responder cannot be tested at the gate because the gate uses Ubuntu - 14.04 that only packages OVS 2.0. OVS added ARP manipulation support in - version 2.1. * Prefix delegation doesn't have functional tests for the dibbler and pd - layers, nor for the L3 agent changes. + layers, nor for the L3 agent changes. This has been an area of repeated + regressions. +* The functional job now compiles OVS 2.5 from source, enabling testing + features that we previously could not. The OVS ARP responder is one such + feature. Modifying the fullstack job to use OVS 2.5 as well will enable + testing the OVS ARP responder implicitly. Missing Infrastructure ---------------------- @@ -108,7 +107,6 @@ proposed in patch 162811. The goal is provide developers a light weight way to rapidly run tests that target the RPC layer, so that a patch that modifies an RPC method's signature could be verified quickly and locally. -* Neutron currently does not test an in-place upgrade (Upgrading the server - first, followed by agents one machine at a time). We make sure that the RPC - layer remains backwards compatible manually via the review process but have - no CI that verifies this. +* Neutron currently runs a 'partial-grenade' job that verifies that an OVS + version from the latest stable release works with neutron-server from master. + We would like to expand this to DHCP and L3 agents as well. diff -Nru neutron-9.0.0~b2~dev280/doc/source/policies/bugs.rst neutron-9.0.0~b3~dev557/doc/source/policies/bugs.rst --- neutron-9.0.0~b2~dev280/doc/source/policies/bugs.rst 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/policies/bugs.rst 2016-08-03 20:10:33.000000000 +0000 @@ -135,29 +135,8 @@ called Neutron `stadium `_. Because of that, their release is managed centrally by the Neutron release team; requests for releases need to be funnelled and screened -properly before they can happen. To this aim, the process to request a release -is as follows: - -* Create a bug report to your Launchpad project: provide details as to what - you would like to release; - - * If you provide an exact commit in the bug report then you need to be a bit - careful. In most cases, you'll want to tag the *merge* commit that merges - your last commit in to the branch. `This bug`__ shows an instance where - this mistake was caught. Notice the difference between the `incorrect - commit`__ and the `correct one`__ which is the merge commit. ``git log - 6191994..22dd683 --oneline`` shows that the first one misses a handful of - important commits that the second one catches. This is the nature of - merging to master. - -.. __: https://bugs.launchpad.net/neutron/+bug/1540633 -.. __: https://github.com/openstack/networking-infoblox/commit/6191994515 -.. __: https://github.com/openstack/networking-infoblox/commit/22dd683e1a - -* Add Neutron to the list of affected projects. -* Add 'release-subproject' tag to the list of tags for the bug report. -* The Neutron release management team will watch these bugs, and work with - you to have the request fulfilled by following the instructions found `here `_. +properly before they can happen. Release request process is described `here +`_. .. _guidelines: @@ -280,7 +259,7 @@ * Depending on ease of reproduction (or if the issue can be spotted in the code), mark it as 'Confirmed'. If you are unable to assess/triage the issue because you do not have access to a repro environment, consider - reaching out the `Lieutenant `_, + reaching out the `Lieutenant `_, go-to person for the affected component; he/she may be able to help: assign the bug to him/her for further screening. If the bug already has an assignee, check that a patch is @@ -352,7 +331,7 @@ New tags, or changes in the meaning of existing tags (or deletion), are to be proposed via patch to this section. After discussion, and approval, a member of the bug team will create/delete the tag in Launchpad. Each tag covers an area -with an identified go-to contact or `Lieutenant `_, +with an identified go-to contact or `Lieutenant `_, who can provide further insight. Bug queries are provided below for convenience, more will be added over time if needed. @@ -363,6 +342,8 @@ +-------------------------------+-----------------------------------------+----------------------+ | api_ | A bug affecting the API layer | Salvatore Orlando | +-------------------------------+-----------------------------------------+----------------------+ +| api-ref_ | A bug affecting the API reference | Akihiro Motoki | ++-------------------------------+-----------------------------------------+----------------------+ | auto-allocated-topology_ | A bug affecting get-me-a-network | Henry Gessau | +-------------------------------+-----------------------------------------+----------------------+ | baremetal_ | A bug affecting Ironic support | Sukhdev Kapur | @@ -428,9 +409,7 @@ +-------------------------------+-----------------------------------------+----------------------+ | qos_ | A bug affecting ML2/QoS | Miguel Ajo | +-------------------------------+-----------------------------------------+----------------------+ -| released-neutronclient_ | A bug affecting released clients | Kyle Mestery | -+-------------------------------+-----------------------------------------+----------------------+ -| release-subproject_ | A request to release a subproject | Ihar Hrachyshka | +| release_ | A request from a subproject | Ihar Hrachyshka | +-------------------------------+-----------------------------------------+----------------------+ | rfe_ | Feature enhancements being screened | Drivers Team | +-------------------------------+-----------------------------------------+----------------------+ @@ -440,6 +419,8 @@ +-------------------------------+-----------------------------------------+----------------------+ | sriov-pci-pt_ | A bug affecting Sriov/PCI PassThrough | Moshe Levi | +-------------------------------+-----------------------------------------+----------------------+ +| tempest_ | A bug in tempest subtree tests | Assaf Muller | ++-------------------------------+-----------------------------------------+----------------------+ | troubleshooting_ | An issue affecting ease of debugging | Assaf Muller | +-------------------------------+-----------------------------------------+----------------------+ | unittest_ | A bug affecting the unit test subtree | Cedric Brandily | @@ -468,6 +449,14 @@ * `API - All bugs `_ * `API - In progress `_ +.. _api-ref: + +API Reference ++++++++++++++ + +* `API Reference - All bugs `_ +* `API Reference - In progress `_ + .. _auto-allocated-topology: Auto Allocated Topology @@ -713,21 +702,13 @@ * `QoS - All bugs `_ * `QoS - In progress `_ -.. _released-neutronclient: +.. _release: -Released Neutron Client -+++++++++++++++++++++++ - -* `Released Neutron Client - All bugs `_ -* `Released Neutron Client - In progress `_ - -.. _release-subproject: +Requests from Stadium Subprojects ++++++++++++++++++++++++++++++++++ -Release Subproject -++++++++++++++++++ - -* `Release Subproject - All bugs `_ -* `Release Subproject - In progress `_ +* `Requests from Stadium Subprojects - All bugs `_ +* `Requests from Stadium Subprojects - In progress `_ .. _rfe: @@ -761,6 +742,15 @@ * `Security groups - All bugs `_ * `Security groups - In progress `_ +.. _tempest: + +Tempest ++++++++ + +* `Tempest - All bugs `_ +* `Tempest - In progress `_ + + .. _troubleshooting: Troubleshooting diff -Nru neutron-9.0.0~b2~dev280/doc/source/stadium/governance.rst neutron-9.0.0~b3~dev557/doc/source/stadium/governance.rst --- neutron-9.0.0~b2~dev280/doc/source/stadium/governance.rst 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/stadium/governance.rst 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,290 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Stadium Governance +================== + +Background +---------- + +Neutron grew to become a big monolithic codebase, and its core team had a +tough time making progress on a number of fronts, like adding new +features, ensuring stability, etc. During the Kilo timeframe, a +decomposition effort started, where the codebase got disaggregated into +separate repos, like the `high level services `_, +and the various third-party solutions for `L2 and L3 services `_, +and the Stadium was officially born. + +These initiatives enabled the various individual teams in charge of the +smaller projects the opportunity to iterate faster and reduce the time to +feature. This has been due to the increased autonomy and implicit trust model +that made the lack of oversight of the PTL and the Neutron drivers/core team +acceptable for a small number of initiatives. When the proposed `arrangement `_ +allowed projects to be `automatically `_ +enlisted as a Neutron project based simply on description, and desire for +affiliation, the number of projects included in the Stadium started to grow +rapidly, which created a number of challenges for the PTL and the drivers +team. + +In fact, it became harder and harder to ensure consistency in the APIs, +architecture, design, implementation and testing of the overarching project; +all aspects of software development, like documentation, integration, release +management, maintainance, and upgrades started to being neglected for some +projects and that led to some unhappy experiences. + +The point about uniform APIs is particularly important, because the Neutron +platform is so flexible that a project can take a totally different turn in +the way it exposes functionality, that it is virtually impossible for the +PTL and the drivers team to ensure that good API design principles are being +followed over time. In a situation where each project is on its own, that +might be acceptable, but allowing independent API evolution while still under +the Neutron umbrella is counterproductive. + +These challenges led the Neutron team to find a better balance between autonomy +and consistency and lay down criteria that more clearly identify when a project +can be eligible for inclusion in the `Neutron governance `_. + +This document describes these criteria, and document the steps involved to +maintain the integrity of the Stadium, and how to ensure this integrity be +maintained over time when modifications to the governance are required. + +When is a project considered part of the Stadium? +------------------------------------------------- + +In order to be considered part of the Stadium, a project must show a track +record of alignment with the Neutron `core project `_. +This means showing proof of adoption of practices as led by the Neutron core +team. Some of these practices are typically already followed by the most +mature OpenStack projects: + + * Exhaustive documentation: it is expected that each project will have a + `developer `_, + `user/operator `_ + and `API `_ + documentations available. + + * Exhaustive OpenStack CI coverage: unit, functional, and tempest coverage + using OpenStack CI (upstream) resources so that `Grafana `_ + and `OpenStack Health `_ + support is available. Access to CI resources and historical data by the + team is key to ensuring stability and robustness of a project. + In particular, it is of paramount importance to ensure that DB models/migrations + are tested functionally to prevent data inconsistency issues or unexpected + DB logic errors due to schema/models mismatch. For more details, please + look at the following resources: + + * https://review.openstack.org/#/c/346091/ + * https://review.openstack.org/#/c/346272/ + * https://review.openstack.org/#/c/346083/ + + More Database related information can be found on: + + * http://docs.openstack.org/developer/neutron/devref/alembic_migrations.html + * http://docs.openstack.org/developer/neutron/devref/db_layer.html + + Bear in mind that many projects have been transitioning their codebase and + tests to fully support Python 3+, and it is important that each Stadium + project supports Python 3+ the same way Neutron core does. For more + information on how to do testing, please refer to the + `Neutron testing documentation testing documentation `_. + + * Good release footprint, according to the chosen `release model `_. + + * Adherence to deprecation and `stable backports policies `_. + + * Demonstrated ability to do `upgrades `_ + and/or `rolling upgrades `_, + where applicable. This means having grenade support on top of the CI + coverage as described above. + + * Client bindings and CLI developed according to the OpenStack Client `plugin model `_. + +On top of the above mentioned criteria, the following also are taken into +consideration: + + * A project must use, adopt and implement open software and technologies. + + * A project must integrate with Neutron via one of the supported, advertised + and maintained public Python APIs. REST API does not qualify (the project + python-neutronclient is an exception). + + * It adopts neutron-lib (with related hacking rules applied), and has proof + of good decoupling from Neutron core internals. + + * It provides an API that adopts API guidelines as set by the Neutron core + team, and that relies on an open implementation. + + * It adopts modular interfaces to provide networking services: this means + that L2/7 services are provided in the form of ML2 mech drivers and + service plugins respectively. A service plugin can expose a driver + interface to support multiple backend technologies, and/or adopt the + flavor framework as necessary. + +Adding or removing projects to the Stadium +------------------------------------------ + +When a project is to be considered part of the Stadium, proof of compliance to +the aforementioned practices will have to be demonstrated typically for at +least two OpenStack releases. Application for inclusion is to be considered +only within the first milestone of each OpenStack cycle, which is the time when +the PTL and Neutron team do release planning, and have the most time available +to discuss governance issues. + +Projects part of the Neutron Stadium have typically the first milestone to get +their house in order, during which time reassessment happens; if removed, because +of substantial lack of meeting the criteria, a project cannot reapply within +the same release cycle it has been evicted. + +The process for proposing a repo into openstack/ and under the Neutron +governance is to propose a patch to the openstack/governance repository. +For example, to propose networking-foo, one would add the following entry +under Neutron in reference/projects.yaml:: + + - repo: openstack/networking-foo + tags: + - name: release:independent + +Typically this is a patch that the PTL, in collaboration with the project's +point of contact, will shepherd through the review process. This step is +undertaken once it is clear that all criteria are met. The next section +provides an informal checklist that shows what steps a project needs to +go through in order to enable the PTL and the TC to vote positively on +the proposed inclusion. + +Once a project is included, it abides by the Neutron `RFE submission process `_, +where specifications to neutron-specs are required for major API as well +as major architectural changes that may require core Neutron platform +enhancements. + +Checklist +--------- + + * How to integrate documentation into docs.o.o: The documentation + website has a section for `project developer documentation `_. + Each project in the Neutron Stadium must have an entry under the + 'Networking Sub Projects' section that points to the developer + documentation for the project, available at http://docs.openstack.org/developer//. + This is a two step process that involves the following: + + * Build the artefacts: this can be done by following example + https://review.openstack.org/#/c/293399/. + * Publish the artefacts: this can be done by following example + https://review.openstack.org/#/c/216448/. + + More information can also be found on the + `project creator guide `_. + + * How to integrate into Grafana: Grafana is a great tool that provides + the ability to display historical series, like failure rates of + OpenStack CI jobs. A few examples that added dashboards over time are: + + * `Neutron `_. + * `Networking-OVN `_. + * `Networking-Midonet `_. + + Any subproject must have a Grafana dashboard that shows failure + rates for at least Gate and Check queues. + + * How to integrate into neutron-lib's CI: there are a number of steps + required to integrate with neutron-lib CI and adopt neutron-lib in + general. One step is to validate that neutron-lib master is working + with the master of a given project that uses neutron-lib. For example + `patch `_ introduced such + support for the Neutron project. Any subproject that wants to do the + same would need to adopt the following few lines: + + #. https://review.openstack.org/#/c/338603/4/jenkins/jobs/projects.yaml@4685 + #. https://review.openstack.org/#/c/338603/3/zuul/layout.yaml@8501 + #. https://review.openstack.org/#/c/338603/4/grafana/neutron.yaml@39 + + Line 1 and 2 respectively add a job to the periodic queue for the + project, whereas line 3 introduced the failure rate trend for the + periodic job to spot failure spikes etc. Make sure your project has + the following: + + #. https://review.openstack.org/#/c/357086/ + #. https://review.openstack.org/#/c/359143/ + + * How to port api-ref over to neutron-lib: to publish the subproject + API reference into the `Networking API guide `_ + you must contribute the API documentation into neutron-lib's api-ref + directory as done in the `WADL/REST transition patch `_. + Once this is done successfully, a link to the subproject API will + show under the published `table of content `_. + An RFE bug tracking this effort effectively initiates the request + for Stadium inclusion, where all the aspects as outlined in this + documented are reviewed by the PTL. + + * How to port API definitions over the neutron-lib: the most basic + steps to port API definitions over to neutron-lib are demonstrated + in the following patches: + + * https://review.openstack.org/#/c/353131/ + * https://review.openstack.org/#/c/353132/ + + The `neutron-lib patch `_ + introduces the elements that define the API, and testing coverage + validates that the resource and actions maps use valid keywords. + API reference documention is provided alongside the definition to + keep everything in one place. + The `neutron patch `_ + uses the Neutron extension framework to plug the API definition + on top of the Neutron API backbone. The change can only merge when + there is a released version of neutron-lib. + + * How to integrate into the openstack release: every project in the + Stadium must have release notes. In order to set up release notes, + please see the patches below for an example on how to set up reno: + + * https://review.openstack.org/#/c/320904/ + * https://review.openstack.org/#/c/243085/ + + For release documentation related to Neutron, please check the + `Neutron policies document `_. + Once, everything is set up and your project is released, make sure + you see an entry on the release page (e.g. `Newton `_. + Make sure you release according to the project declared release + `model `_. + + * How to port OpenStack Client over to python-neutronclient: client + API bindings and client command line interface support must be + developed in python-neutronclient under `osc module `_. + If your project requires one or both, consider looking at the + following example on how to contribute these two python-neutronclient + according to the OSC framework and guidelines: + + * https://review.openstack.org/#/c/340624/ + * https://review.openstack.org/#/c/340763/ + * https://review.openstack.org/#/c/352653/ + + More information on how to develop python-openstackclient plugins + can be found on the following links: + + * http://docs.openstack.org/developer/python-openstackclient/plugins.html + * http://docs.openstack.org/developer/python-openstackclient/humaninterfaceguide.html + + It is worth prefixing the commands being added with the keyword + `network `_ to + avoid potential clash with other commands with similar names. This + is only required if the command object name is highly likely to have + an ambiguous meaning. diff -Nru neutron-9.0.0~b2~dev280/doc/source/stadium/guidelines.rst neutron-9.0.0~b3~dev557/doc/source/stadium/guidelines.rst --- neutron-9.0.0~b2~dev280/doc/source/stadium/guidelines.rst 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/stadium/guidelines.rst 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,222 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + + Convention for heading levels in Neutron devref: + ======= Heading 0 (reserved for the title in a document) + ------- Heading 1 + ~~~~~~~ Heading 2 + +++++++ Heading 3 + ''''''' Heading 4 + (Avoid deeper levels because they do not render well.) + + +Sub-Project Guidelines +====================== + +This document provides guidance for those who maintain projects that consume +main neutron or neutron advanced services repositories as a dependency. It is +not meant to describe projects that are not tightly coupled with Neutron code. + +Code Reuse +---------- + +At all times, avoid using any Neutron symbols that are explicitly marked as +private (those have an underscore at the start of their names). + +Try to avoid copy pasting the code from Neutron to extend it. Instead, rely on +enormous number of different plugin entry points provided by Neutron (L2 agent +extensions, API extensions, service plugins, core plugins, ML2 mechanism +drivers, etc.) + +Requirements +------------ + +Neutron dependency +~~~~~~~~~~~~~~~~~~ + +Subprojects usually depend on neutron repositories, by using -e git://... +schema to define such a dependency. The dependency *must not* be present in +requirements lists though, and instead belongs to tox.ini deps section. This is +because next pbr library releases do not guarantee -e git://... dependencies +will work. + +You may still put some versioned neutron dependency in your requirements list +to indicate the dependency for anyone who packages your subproject. + +Explicit dependencies +~~~~~~~~~~~~~~~~~~~~~ + +Each neutron project maintains its own lists of requirements. Subprojects that +depend on neutron while directly using some of those libraries that neutron +maintains as its dependencies must not rely on the fact that neutron will pull +the needed dependencies for them. Direct library usage requires that this +library is mentioned in requirements lists of the subproject. + +The reason to duplicate those dependencies is that neutron team does not stick +to any backwards compatibility strategy in regards to requirements lists, and +is free to drop any of those dependencies at any time, breaking anyone who +could rely on those libraries to be pulled by neutron itself. + +Automated requirements updates +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At all times, subprojects that use neutron as a dependency should make sure +their dependencies do not conflict with neutron's ones. + +Core neutron projects maintain their requirements lists by utilizing a +so-called proposal bot. To keep your subproject in sync with neutron, it is +highly recommended that you register your project in +openstack/requirements:projects.txt file to enable the bot to update +requirements for you. + +Once a subproject opts in global requirements synchronization, it should enable +check-requirements jobs in project-config. For example, see `this patch +`_. + +Stable branches +--------------- + +Stable branches for subprojects should be created at the same time when +corresponding neutron stable branches are created. This is to avoid situations +when a postponed cut-off results in a stable branch that contains some patches +that belong to the next release. This would require reverting patches, and this +is something you should avoid. + +Make sure your neutron dependency uses corresponding stable branch for neutron, +not master. + +Note that to keep requirements in sync with core neutron repositories in stable +branches, you should make sure that your project is registered in +openstack/requirements:projects.txt *for the branch in question*. + +Subproject stable branches are supervised by horizontal `neutron-stable-maint +team `_. + +More info on stable branch process can be found on `the following page +`_. + +Stable merge requirements +------------------------- + +Merges into stable branches are handled by members of the `neutron-stable-maint +gerrit group `_. The +reason for this is to ensure consistency among stable branches, and compliance +with policies for stable backports. + +For sub-projects who participate in the Neutron Stadium effort and who also +create and utilize stable branches, there is an expectation around what is +allowed to be merged in these stable branches. The Stadium projects should be +following the stable branch policies as defined by on the `Stable Branch wiki +`_. This +means that, among other things, no features are allowed to be backported into +stable branches. + +Releases +-------- + +It is suggested that sub-projects release new tarballs on PyPI from time to +time, especially for stable branches. It will make the life of packagers and +other consumers of your code easier. + +It is highly suggested that you do not strip pieces of the source tree (tests, +executables, tools) before releasing on PyPI: those missing pieces may be +needed to validate the package, or make the packaging easier or more complete. +As a rule of thumb, don't strip anything from the source tree unless completely +needed. + +Sub-Project Release Process +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Only members of the `neutron-release +`_ gerrit group can do +the following release related tasks: + +* Make releases +* Create stable branches +* Make stable branches end of life + +Make sure you talk to a member of neutron-release to perform these tasks. + +To release a sub-project, follow the following steps: + +* For projects which have not moved to post-versioning, we need to push an + alpha tag to avoid pbr complaining. A member of the neutron-release group + will handle this. +* A sub-project owner should modify setup.cfg to remove the version (if you + have one), which moves your project to post-versioning, similar to all the + other Neutron projects. You can skip this step if you don't have a version in + setup.cfg. +* A sub-project owner `proposes + `_ a patch + to openstack/releases repository with the intended git hash. `The Neutron + release liaison `_ + should be added in Gerrit to the list of reviewers for the patch. +* If the subproject is not `managed + `_ by + OpenStack Release Team, a member of `neutron-release + `_ `tags the release + `_ and + creates the needed stable branches, if needed. Note: tagging will release + the code to PyPI. Note: new major tag versions should conform to SemVer + requirements, meaning no year numbers should be used as a major version. The + switch to SemVer is advised at earliest convenience for all new major + releases. +* The Neutron release liaison votes with +1 for the openstack/releases patch + that gives indication to release team the patch is ready to merge. +* The releases will now be on PyPI. A sub-project owner should verify this by + going to an URL similar to + `this `_. +* A sub-project owner should next go to Launchpad and release this version + using the "Release Now" button for the release itself. +* If a sub-project uses the "delay-release" option, a sub-project owner should + update any bugs that were fixed with this release to "Fix Released" in + Launchpad. This step is not necessary if the sub-project uses the + "direct-release" option, which is the default. [#jeepyb_release_options]_ +* The new release will be available on `OpenStack Releases + `_. +* A sub-project owner should add the next milestone to the Launchpad series, or + if a new series is required, create the new series and a new milestone. +* Finally a sub-project owner should send an email to the openstack-announce + mailing list announcing the new release. + +.. note:: + + You need to be careful when picking a git commit to base new releases on. + In most cases, you'll want to tag the *merge* commit that merges your last + commit in to the branch. `This bug`__ shows an instance where this mistake + was caught. Notice the difference between the `incorrect commit`__ and the + `correct one`__ which is the merge commit. ``git log 6191994..22dd683 + --oneline`` shows that the first one misses a handful of important commits + that the second one catches. This is the nature of merging to master. + +.. __: https://bugs.launchpad.net/neutron/+bug/1540633 +.. __: https://github.com/openstack/networking-infoblox/commit/6191994515 +.. __: https://github.com/openstack/networking-infoblox/commit/22dd683e1a + + +To make a branch end of life, follow the following steps: + +* A member of neutron-release will abandon all open change reviews on + the branch. +* A member of neutron-release will push an EOL tag on the branch. + (eg. "icehouse-eol") +* A sub-project owner should request the infrastructure team to delete + the branch by sending an email to the infrastructure mailing list, not by + bothering the infrastructure team on IRC. +* A sub-project owner should tweak jenkins jobs in project-config if any. + +References +~~~~~~~~~~ + +.. [#jeepyb_release_options] http://lists.openstack.org/pipermail/openstack-dev/2015-December/081724.html diff -Nru neutron-9.0.0~b2~dev280/doc/source/stadium/index.rst neutron-9.0.0~b3~dev557/doc/source/stadium/index.rst --- neutron-9.0.0~b2~dev280/doc/source/stadium/index.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/stadium/index.rst 2016-08-29 20:05:49.000000000 +0000 @@ -16,11 +16,26 @@ Neutron Stadium ================ -The Stadium Guide contains information on policies and procedures for the -Neutron Stadium. +This section contains information on policies and procedures for the so called +Neutron Stadium. The Neutron Stadium is the list of projects that show up in the +OpenStack `Governance Document `_. + +The list includes projects that the Neutron PTL and core team are directly +involved in, and manage on a day to day basis. To do so, the PTL and team +ensure that common practices and guidelines are followed throughout the Stadium, +for all aspects that pertain software development, from inception, to coding, +testing, documentation and more. + +The Stadium is not to be intended as a VIP club for OpenStack networking +projects, or an upper tier within OpenStack. It is simply the list of projects +the Neutron team and PTL claim responsibility for when producing Neutron +deliverables throughout the release `cycles `_. + +For more details on the Stadium, and what it takes for a project to be +considered an integral part of the Stadium, please read on. .. toctree:: :maxdepth: 3 - sub_projects - sub_project_guidelines + governance + guidelines diff -Nru neutron-9.0.0~b2~dev280/doc/source/stadium/sub_project_guidelines.rst neutron-9.0.0~b3~dev557/doc/source/stadium/sub_project_guidelines.rst --- neutron-9.0.0~b2~dev280/doc/source/stadium/sub_project_guidelines.rst 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/stadium/sub_project_guidelines.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,196 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - - -Sub-Project Guidelines -====================== - -This document provides guidance for those who maintain projects that consume -main neutron or neutron advanced services repositories as a dependency. It is -not meant to describe projects that are not tightly coupled with Neutron code. - -Code Reuse ----------- - -At all times, avoid using any Neutron symbols that are explicitly marked as -private (those have an underscore at the start of their names). - -Try to avoid copy pasting the code from Neutron to extend it. Instead, rely on -enormous number of different plugin entry points provided by Neutron (L2 agent -extensions, API extensions, service plugins, core plugins, ML2 mechanism -drivers, etc.) - -Requirements ------------- - -Neutron dependency -~~~~~~~~~~~~~~~~~~ - -Subprojects usually depend on neutron repositories, by using -e git://... -schema to define such a dependency. The dependency *must not* be present in -requirements lists though, and instead belongs to tox.ini deps section. This is -because next pbr library releases do not guarantee -e git://... dependencies -will work. - -You may still put some versioned neutron dependency in your requirements list -to indicate the dependency for anyone who packages your subproject. - -Explicit dependencies -~~~~~~~~~~~~~~~~~~~~~ - -Each neutron project maintains its own lists of requirements. Subprojects that -depend on neutron while directly using some of those libraries that neutron -maintains as its dependencies must not rely on the fact that neutron will pull -the needed dependencies for them. Direct library usage requires that this -library is mentioned in requirements lists of the subproject. - -The reason to duplicate those dependencies is that neutron team does not stick -to any backwards compatibility strategy in regards to requirements lists, and -is free to drop any of those dependencies at any time, breaking anyone who -could rely on those libraries to be pulled by neutron itself. - -Automated requirements updates -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -At all times, subprojects that use neutron as a dependency should make sure -their dependencies do not conflict with neutron's ones. - -Core neutron projects maintain their requirements lists by utilizing a -so-called proposal bot. To keep your subproject in sync with neutron, it is -highly recommended that you register your project in -openstack/requirements:projects.txt file to enable the bot to update -requirements for you. - -Once a subproject opts in global requirements synchronization, it should enable -check-requirements jobs in project-config. For example, see `this patch -`_. - -Stable branches ---------------- - -Stable branches for subprojects should be created at the same time when -corresponding neutron stable branches are created. This is to avoid situations -when a postponed cut-off results in a stable branch that contains some patches -that belong to the next release. This would require reverting patches, and this -is something you should avoid. - -Make sure your neutron dependency uses corresponding stable branch for neutron, -not master. - -Note that to keep requirements in sync with core neutron repositories in stable -branches, you should make sure that your project is registered in -openstack/requirements:projects.txt *for the branch in question*. - -Subproject stable branches are supervised by horizontal `neutron-stable-maint -team `_. - -More info on stable branch process can be found on `the following page -`_. - -Stable merge requirements -------------------------- - -Merges into stable branches are handled by members of the `neutron-stable-maint -gerrit group `_. The -reason for this is to ensure consistency among stable branches, and compliance -with policies for stable backports. - -For sub-projects who participate in the Neutron Stadium effort and who also -create and utilize stable branches, there is an expectation around what is -allowed to be merged in these stable branches. The Stadium projects should be -following the stable branch policies as defined by on the `Stable Branch wiki -`_. This -means that, among other things, no features are allowed to be backported into -stable branches. - -Releases --------- - -It is suggested that sub-projects release new tarballs on PyPI from time to -time, especially for stable branches. It will make the life of packagers and -other consumers of your code easier. - -It is highly suggested that you do not strip pieces of the source tree (tests, -executables, tools) before releasing on PyPI: those missing pieces may be -needed to validate the package, or make the packaging easier or more complete. -As a rule of thumb, don't strip anything from the source tree unless completely -needed. - -Sub-Project Release Process -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Only members of the `neutron-release -`_ gerrit group can do -the following release related tasks: - -* Make releases -* Create stable branches -* Make stable branches end of life - -Make sure you talk to a member of neutron-release to perform these tasks. - -Follow the process found `here `_ -for creating a bug for your request. - -To release a sub-project, follow the following steps: - -* For projects which have not moved to post-versioning, we need to push an - alpha tag to avoid pbr complaining. A member of the neutron-release group - will handle this. -* A sub-project owner should modify setup.cfg to remove the version (if you - have one), which moves your project to post-versioning, similar to all the - other Neutron projects. You can skip this step if you don't have a version in - setup.cfg. -* A member of neutron-release will then `tag the release - `_, - which will release the code to PyPI. -* The releases will now be on PyPI. A sub-project owner should verify this by - going to an URL similar to - `this `_. -* A sub-project owner should next go to Launchpad and release this version - using the "Release Now" button for the release itself. -* If a sub-project uses the "delay-release" option, a sub-project owner should - update any bugs that were fixed with this release to "Fix Released" in - Launchpad. This step is not necessary if the sub-project uses the - "direct-release" option, which is the default. [#jeepyb_release_options]_ -* The new release will be available on `OpenStack Releases - `_. -* A sub-project owner should add the next milestone to the Launchpad series, or - if a new series is required, create the new series and a new milestone. -* Finally a sub-project owner should send an email to the openstack-announce - mailing list announcing the new release. - -To make a branch end of life, follow the following steps: - -* A member of neutron-release will abandon all open change reviews on - the branch. -* A member of neutron-release will push an EOL tag on the branch. - (eg. "icehouse-eol") -* A sub-project owner should request the infrastructure team to delete - the branch by sending an email to the infrastructure mailing list, not by - bothering the infrastructure team on IRC. -* A sub-project owner should tweak jenkins jobs in project-config if any. - -References -~~~~~~~~~~ - -.. [#jeepyb_release_options] http://lists.openstack.org/pipermail/openstack-dev/2015-December/081724.html diff -Nru neutron-9.0.0~b2~dev280/doc/source/stadium/sub_projects.rst neutron-9.0.0~b3~dev557/doc/source/stadium/sub_projects.rst --- neutron-9.0.0~b2~dev280/doc/source/stadium/sub_projects.rst 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/doc/source/stadium/sub_projects.rst 1970-01-01 00:00:00.000000000 +0000 @@ -1,683 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - - Convention for heading levels in Neutron devref: - ======= Heading 0 (reserved for the title in a document) - ------- Heading 1 - ~~~~~~~ Heading 2 - +++++++ Heading 3 - ''''''' Heading 4 - (Avoid deeper levels because they do not render well.) - - -Neutron Stadium -=============== - -Introduction ------------- - -Neutron has grown to be a complex project made of many moving parts. The -codebase is the aggregation of smaller projects that, once assembled in a -specific configuration, implement one of the many deployment architectures -to deliver networking services. - -This document explains the inclusion process, and the criteria chosen to -select a project for inclusion. It also outlines the lists of projects -that are either managed by the `Neutron teams `_, -or that are affiliated to Neutron via an integration point made available -by the core pluggable framework. - -Demystifying the mission ------------------------- - -The Neutron `mission `_ -states that Neutron is all about delivering network services and libraries. -Although this has been true for the existence of the project, the project -itself has evolved over the years to meet the demands of a growing community -of users and developers who have an interest in adopting, building new and -leveraging existing network functionality. To continue to stay true to -its mission, and yet reduce the management burden, the project transformed -itself into a pluggable framework, and a community where interested parties -come together to discuss and define APIs and respective implementations that -ultimately are delivered on top of the aforementioned pluggable framework. -Some of these APIs and implementations are considered to be a part of the -Neutron project. For the ones that are not, there is no connotation of -_poor_ quality associated with them. Their association, or lack thereof, is -simply a reflection of the fact that a good portion of Neutron team feels -favorable towards developing, and supporting the project in the wider -OpenStack ecosystem. - -Inclusion Process ------------------ - -The process for proposing a repo into openstack/ and under the Neutron -project is to propose a patch to the openstack/governance repository. -For example, to propose networking-foo, one would add the following entry -under Neutron in reference/projects.yaml:: - - - repo: openstack/networking-foo - tags: - - name: release:independent - -For more information about the release:independent tag (and other -currently defined tags) see: - - http://governance.openstack.org/reference/tags/ - -The Neutron PTL must approve the change. The TC clarified that once a -project has been approved (Neutron in this case), the project can add -additional repos without needing TC approval as long as the added -repositories are within the existing approved scope of the project. - - http://git.openstack.org/cgit/openstack/governance/commit/?id=321a020cbcaada01976478ea9f677ebb4df7bd6d - -In order to create a project, in case it does not exist, follow steps -as explained in: - - http://docs.openstack.org/infra/manual/creators.html - -Responsibilities ----------------- - -All affected repositories already have their own review teams. The -sub-team working on the sub-project is entirely responsible for -day-to-day development. That includes reviews, bug tracking, and -working on testing. - -By being included, the project accepts oversight by the TC as a part of -being in OpenStack, and also accepts oversight by the Neutron PTL. - -It is also assumed the respective review teams will make sure their projects -stay in line with `current best practices `_. - -Inclusion Criteria ------------------- - -As mentioned before, the Neutron PTL must approve the inclusion of each -additional repository under the Neutron project. When in doubt, the PTL -should consider erring on the side of caution, and keep the project out of -the list until more consensus amongst the team can be built or a more -favorable assessment can be determined. -That evaluation will be initially based on the new project requirements used -for all new OpenStack projects for the criteria that is applicable. If -there is any question about this, the review should be deferred to the TC -as a new OpenStack project team. - - http://governance.openstack.org/reference/new-projects-requirements.html - -Including *everything* related to Neutron under the Neutron project team has not -scaled well, so some Neutron related projects are encouraged to form a new -OpenStack project team. The following list of guidelines are not hard rules. -There may be exceptions. Instead, they serve as criteria that may influence the -decision one way or the other. Sub-projects will be reviewed regularly to see -how they meet these criteria. - -These criteria are designed around how easy it would be for members of the -loosely defined "Neutron team" to jump in and help fix or even take over a given -repository if needed. - -* Neutron stays quite busy developing and maintaining open source - implementations for features. Any sub-project that serves as an interface to - proprietary technology should most likely be a separate project team. This - imposes a barrier on access to the technology for dev/test and CI integration. -* If the project only interacts with Neutron on REST API boundaries (client of - Neutron's API, or Neutron is a client of its API), it should probably be a - separate project. python-neutronclient is an obvious exception here. -* The area of functionality of a sub-project should be taken into consideration. - The closer the functionality is to the base functionality implemented in - openstack/neutron, the more likely it makes sense under the Neutron project - team. Conversely, something "higher" in the stack considered an optional - advanced service is more likely to make sense as an independent project. - This is subject to change as the Neutron project evolves and continues to - explore the boundaries that work best for the project. -* OpenStack project teams are based around both technology and groups of people. - If a sub-project is directly driven by a subset of members of the Neutron team, - with the wider approval of the Neutron team, then it makes sense to retain it - under the Neutron project team. Conversely, a project that was developed - without oversight or engagement of any of the Neutron members cannot qualify. - For the sake of this criterion, a member of the team is a known (core or not) - contributor with a substantial track record of Neutron development. - - -Official Sub-Project List -------------------------- - -The official source of all repositories that are a part of Neutron or another -official OpenStack project team is here: - - http://governance.openstack.org/reference/projects/neutron.html - -We list the Neutron repositories, as well as other Neutron affiliated projects -here to provide references and note the functionality they provide. - -Functionality legend -~~~~~~~~~~~~~~~~~~~~ - -- base: the base Neutron platform; -- bgp: BGP dynamic routing service plugin; -- client: API client implementation; -- core: a monolithic plugin that can implement API at multiple layers L3-L7; -- dashboard: Horizon dashboard integration; -- docker: a Docker network plugin that uses Neutron to provide networking services to Docker containers; -- fw: a Firewall service plugin; -- intent: a service plugin that provides a declarative API to realize networking; -- ipam: an IP address management driver; -- l2: a Layer 2 service; -- l3: a Layer 3 service plugin; -- lb: a Load Balancer service plugin; -- ml2: an ML2 mechanism driver; -- pd: prefix delegation; -- sfc: traffic steering based on traffic classification; -- vpn: a VPN service plugin; - -Neutron projects -~~~~~~~~~~~~~~~~ - -This table shows the list of official Neutron repositories and their -functionality. - -+-------------------------------+-----------------------+ -| Name | Functionality | -+===============================+=======================+ -| networking-bagpipe_ | ml2 | -+-------------------------------+-----------------------+ -| networking-bgpvpn_ | vpn | -+-------------------------------+-----------------------+ -| networking-calico_ | ml2 | -+-------------------------------+-----------------------+ -| networking-l2gw_ | l2 | -+-------------------------------+-----------------------+ -| networking-midonet_ | core,ml2,l3,lb,fw | -+-------------------------------+-----------------------+ -| networking-odl_ | ml2,l3,lb,fw | -+-------------------------------+-----------------------+ -| networking-ofagent_ | ml2 | -+-------------------------------+-----------------------+ -| networking-onos_ | ml2,l3 | -+-------------------------------+-----------------------+ -| networking-ovn_ | ml2,l3 | -+-------------------------------+-----------------------+ -| networking-sfc_ | sfc | -+-------------------------------+-----------------------+ -| neutron_ | base,l2,ml2,core,l3 | -+-------------------------------+-----------------------+ -| neutron-dynamic-routing_ | bgp | -+-------------------------------+-----------------------+ -| neutron-fwaas_ | fw | -+-------------------------------+-----------------------+ -| neutron-lbaas_ | lb,dashboard | -| neutron-lbaas-dashboard_ | | -| octavia_ | | -+-------------------------------+-----------------------+ -| neutron-lib_ | base | -+-------------------------------+-----------------------+ -| neutron-vpnaas_ | vpn | -+-------------------------------+-----------------------+ -| python-neutronclient_ | client | -+-------------------------------+-----------------------+ -| python-neutron-pd-driver_ | pd | -+-------------------------------+-----------------------+ - - -Affiliated projects -~~~~~~~~~~~~~~~~~~~ - -This table shows the affiliated projects that integrate with Neutron, -in one form or another. These projects typically leverage the pluggable -capabilities of Neutron, the Neutron API, or a combination of both. - -+-------------------------------+-----------------------+ -| Name | Functionality | -+===============================+=======================+ -| dragonflow_ | core | -+-------------------------------+-----------------------+ -| kuryr_ | docker | -+-------------------------------+-----------------------+ -| networking-ale-omniswitch_ | ml2 | -+-------------------------------+-----------------------+ -| networking-arista_ | ml2,l3 | -+-------------------------------+-----------------------+ -| networking-bigswitch_ | ml2,core,l3 | -+-------------------------------+-----------------------+ -| networking-brocade_ | ml2,l3 | -+-------------------------------+-----------------------+ -| networking-cisco_ | core,ml2,l3,fw,vpn | -+-------------------------------+-----------------------+ -| networking-edge-vpn_ | vpn | -+-------------------------------+-----------------------+ -| networking-fortinet_ | ml2,l3,fw | -+-------------------------------+-----------------------+ -| networking-fujitsu_ | ml2 | -+-------------------------------+-----------------------+ -| networking-hyperv_ | ml2 | -+-------------------------------+-----------------------+ -| networking-infoblox_ | ipam | -+-------------------------------+-----------------------+ -| networking-mlnx_ | ml2 | -+-------------------------------+-----------------------+ -| networking-nec_ | core | -+-------------------------------+-----------------------+ -| networking-ovs-dpdk_ | ml2 | -+-------------------------------+-----------------------+ -| networking-plumgrid_ | core | -+-------------------------------+-----------------------+ -| networking-powervm_ | ml2 | -+-------------------------------+-----------------------+ -| networking-vsphere_ | ml2 | -+-------------------------------+-----------------------+ -| nuage-openstack-neutron_ | core | -+-------------------------------+-----------------------+ -| vmware-nsx_ | core | -+-------------------------------+-----------------------+ - -Project Teams FAQ -~~~~~~~~~~~~~~~~~ - -**Q: When talking about contributor overlap, what is a contributor?** - -A Neutron contributor is someone who spends some portion of their time helping -with all of the things needed to run the Neutron project: bug triage, writing -and reviewing blueprints, writing and reviewing code, writing and reviewing -documentation, helping debug issues found by users or CI, and more. - -**Q: Why choose contributor overlap over technical overlap?** - -Technical overlap, or software qualities, are more difficult to pinpoint and -require a more extensive assessment from the PTL and the Neutron team, which -in turn has the danger of translating itself into a nearly full-time -policing/enforcement job. Wrongdoing will always be spotted, regardless of -whichever criteria is applied, and trusting known members of the team to do -the right thing should be an adequate safety net to preserve the sanity of -Neutron as a whole. - -**Q: What does a sub-project gain as a part of the Neutron project team?** - -A project under Neutron is no more an official part of OpenStack than another -OpenStack project team. Projects under Neutron share some resources. In -particular, they get managed backports, managed releases, managed CVEs, RFEs, -bugs, docs and everything that pertain the SDLC of the Neutron end-to-end -project. - -**Q: Why is kuryr a separate project?** - -Kuryr was started and incubated within the Neutron team. However, it interfaces -with Neutron as a client of the Neutron API, so it makes sense to stand as an -independent project. - -**Q: Why are several "advanced service" projects still included under Neutron?** - -neutron-lbaas, neutron-fwaas, and neutron-vpnaas are all included under the -Neutron project team largely for historical reasons. They were originally a -part of neutron itself and are still a part of the neutron deliverable in terms -of OpenStack governance. Because of the deliverable inclusion, they should really -only be considered for a move on a release boundary. - -**Q: Why is Octavia included under Neutron?** - -neutron-lbaas, neutron-lbaas-dashboard, and Octavia are all considered a unit. -If we split one, we need to split them together. We can't split these yet, as -they are a part of the official "neutron" deliverable. This needs to be done on -a release boundary when the lbaas team is ready to do so. - -.. _networking-ale-omniswitch: - -ALE Omniswitch -++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-ale-omniswitch -* Launchpad: https://launchpad.net/networking-ale-omniswitch -* PyPI: https://pypi.python.org/pypi/networking-ale-omniswitch - -.. _networking-arista: - -Arista -++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-arista -* Launchpad: https://launchpad.net/networking-arista -* PyPI: https://pypi.python.org/pypi/networking-arista - -.. _networking-bagpipe: - -BaGPipe -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-bagpipe -* Launchpad: https://launchpad.net/networking-bagpipe -* PyPI: https://pypi.python.org/pypi/networking-bagpipe - -.. _networking-bgpvpn: - -BGPVPN -++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-bgpvpn -* Launchpad: https://launchpad.net/bgpvpn -* PyPI: https://pypi.python.org/pypi/networking-bgpvpn - -.. _networking-bigswitch: - -Big Switch Networks -+++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-bigswitch -* Launchpad: https://launchpad.net/networking-bigswitch -* PyPI: https://pypi.python.org/pypi/bsnstacklib - -.. _networking-brocade: - -Brocade -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-brocade -* Launchpad: https://launchpad.net/networking-brocade -* PyPI: https://pypi.python.org/pypi/networking-brocade - -.. _networking-calico: - -Calico -++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-calico -* Launchpad: https://launchpad.net/networking-calico -* PyPI: https://pypi.python.org/pypi/networking-calico - -.. _networking-cisco: - -Cisco -+++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-cisco -* Launchpad: https://launchpad.net/networking-cisco -* PyPI: https://pypi.python.org/pypi/networking-cisco - -.. _dragonflow: - -DragonFlow -++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/dragonflow -* Launchpad: https://launchpad.net/dragonflow -* PyPI: https://pypi.python.org/pypi/DragonFlow - -.. _networking-edge-vpn: - -Edge VPN -++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-edge-vpn -* Launchpad: https://launchpad.net/edge-vpn - -.. _networking-fortinet: - -Fortinet -++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-fortinet -* Launchpad: https://launchpad.net/networking-fortinet -* PyPI: https://pypi.python.org/pypi/networking-fortinet - -.. _networking-fujitsu: - -FUJITSU -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-fujitsu -* Launchpad: https://launchpad.net/networking-fujitsu -* PyPI: https://pypi.python.org/pypi/networking-fujitsu - -.. _networking-hyperv: - -Hyper-V -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-hyperv -* Launchpad: https://launchpad.net/networking-hyperv -* PyPI: https://pypi.python.org/pypi/networking-hyperv - -.. _networking-infoblox: - -Infoblox -++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-infoblox -* Launchpad: https://launchpad.net/networking-infoblox -* PyPI: https://pypi.python.org/pypi/networking-infoblox - -.. _kuryr: - -Kuryr -+++++ - -* Git: https://git.openstack.org/cgit/openstack/kuryr/ -* Launchpad: https://launchpad.net/kuryr -* PyPI: https://pypi.python.org/pypi/kuryr/ - -.. _networking-l2gw: - -L2 Gateway -++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-l2gw -* Launchpad: https://launchpad.net/networking-l2gw -* PyPI: https://pypi.python.org/pypi/networking-l2gw - -.. _networking-midonet: - -MidoNet -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-midonet -* Launchpad: https://launchpad.net/networking-midonet -* PyPI: https://pypi.python.org/pypi/networking-midonet - -.. _networking-mlnx: - -Mellanox -++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-mlnx -* Launchpad: https://launchpad.net/networking-mlnx - -.. _networking-nec: - -NEC -+++ - -* Git: https://git.openstack.org/cgit/openstack/networking-nec -* Launchpad: https://launchpad.net/networking-nec -* PyPI: https://pypi.python.org/pypi/networking-nec - -.. _neutron: - -Neutron -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron -* Launchpad: https://launchpad.net/neutron - -.. _python-neutronclient: - -Neutron Client -++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/python-neutronclient -* Launchpad: https://launchpad.net/python-neutronclient -* PyPI: https://pypi.python.org/pypi/python-neutronclient - -.. _neutron-dynamic-routing: - -Neutron Dynamic Routing -+++++++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron-dynamic-routing -* Launchpad: https://launchpad.net/neutron - -.. _neutron-fwaas: - -Neutron FWaaS -+++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron-fwaas -* Launchpad: https://launchpad.net/neutron - -.. _neutron-lbaas: - -Neutron LBaaS -+++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron-lbaas -* Launchpad: https://launchpad.net/neutron - -.. _neutron-lbaas-dashboard: - -Neutron LBaaS Dashboard -+++++++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard -* Launchpad: https://launchpad.net/neutron - -.. _neutron-lib: - -Neutron Library -+++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron-lib -* Launchpad: https://launchpad.net/neutron - -.. _python-neutron-pd-driver: - -Neutron Prefix Delegation -+++++++++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/python-neutron-pd-driver -* Launchpad: https://launchpad.net/python-neutron-pd-driver -* PyPI: https://pypi.python.org/pypi/python-neutron-pd-driver - -.. _neutron-vpnaas: - -Neutron VPNaaS -++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/neutron-vpnaas -* Launchpad: https://launchpad.net/neutron - -.. _nuage-openstack-neutron: - -Nuage -+++++ - -* Git: https://github.com/nuagenetworks/nuage-openstack-neutron - -.. _octavia: - -Octavia -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/octavia -* Launchpad: https://launchpad.net/octavia -* PyPI: https://pypi.python.org/pypi/octavia - -.. _networking-odl: - -OpenDayLight -++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-odl -* Launchpad: https://launchpad.net/networking-odl -* PyPI: https://pypi.python.org/pypi/networking-odl - -.. _networking-ofagent: - -OpenFlow Agent (ofagent) -++++++++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-ofagent -* Launchpad: https://launchpad.net/networking-ofagent -* PyPI: https://pypi.python.org/pypi/networking-ofagent - -Note: The networking-ofagent project has been removed in the Newton cycle - and the only stable branch is maintained until its EOL. - -.. _networking-onos: - -Open Network Operating System (onos) -++++++++++++++++++++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-onos -* Launchpad: https://launchpad.net/networking-onos -* PyPI: https://pypi.python.org/pypi/networking-onos - -.. _networking-ovn: - -Open Virtual Network -++++++++++++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-ovn -* Launchpad: https://launchpad.net/networking-ovn -* PyPI: https://pypi.python.org/pypi/networking-ovn - -.. _networking-ovs-dpdk: - -Open DPDK -+++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-ovs-dpdk -* Launchpad: https://launchpad.net/networking-ovs-dpdk -* PyPI: https://pypi.python.org/pypi/networking-ovs-dpdk - -.. _networking-plumgrid: - -PLUMgrid -++++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-plumgrid -* Launchpad: https://launchpad.net/networking-plumgrid -* PyPI: https://pypi.python.org/pypi/networking-plumgrid - -.. _networking-powervm: - -PowerVM -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-powervm -* Launchpad: https://launchpad.net/networking-powervm -* PyPI: https://pypi.python.org/pypi/networking-powervm - -.. _networking-sfc: - -SFC -+++ - -* Git: https://git.openstack.org/cgit/openstack/networking-sfc -* Launchpad: https://launchpad.net/networking-sfc -* PyPI: https://pypi.python.org/pypi/networking-sfc - -.. _networking-vsphere: - -vSphere -+++++++ - -* Git: https://git.openstack.org/cgit/openstack/networking-vsphere -* Launchpad: https://launchpad.net/networking-vsphere -* PyPI: https://pypi.python.org/pypi/networking-vsphere - -.. _vmware-nsx: - -VMware NSX -++++++++++ - -* Git: https://git.openstack.org/cgit/openstack/vmware-nsx -* Launchpad: https://launchpad.net/vmware-nsx -* PyPI: https://pypi.python.org/pypi/vmware-nsx diff -Nru neutron-9.0.0~b2~dev280/etc/policy.json neutron-9.0.0~b3~dev557/etc/policy.json --- neutron-9.0.0~b2~dev280/etc/policy.json 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/etc/policy.json 2016-08-29 20:05:49.000000000 +0000 @@ -17,9 +17,11 @@ "create_subnet": "rule:admin_or_network_owner", "create_subnet:segment_id": "rule:admin_only", + "create_subnet:service_types": "rule:admin_only", "get_subnet": "rule:admin_or_owner or rule:shared", "get_subnet:segment_id": "rule:admin_only", "update_subnet": "rule:admin_or_network_owner", + "update_subnet:service_types": "rule:admin_only", "delete_subnet": "rule:admin_or_network_owner", "create_subnetpool": "", @@ -218,5 +220,12 @@ "create_flavor_service_profile": "rule:admin_only", "delete_flavor_service_profile": "rule:admin_only", "get_flavor_service_profile": "rule:regular_user", - "get_auto_allocated_topology": "rule:admin_or_owner" + "get_auto_allocated_topology": "rule:admin_or_owner", + + "create_trunk": "rule:regular_user", + "get_trunk": "rule:admin_or_owner", + "delete_trunk": "rule:admin_or_owner", + "get_subports": "", + "add_subports": "rule:admin_or_owner", + "remove_subports": "rule:admin_or_owner" } diff -Nru neutron-9.0.0~b2~dev280/HACKING.rst neutron-9.0.0~b3~dev557/HACKING.rst --- neutron-9.0.0~b2~dev280/HACKING.rst 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/HACKING.rst 2016-08-03 20:10:33.000000000 +0000 @@ -28,6 +28,7 @@ - [N334] Use unittest2 uniformly across Neutron. - [N340] Check usage of .i18n (and neutron.i18n) - [N341] Check usage of _ from python builtins +- [N342] String interpolation should be delayed at logging calls. Creating Unit Tests ------------------- diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/agent_extension.py neutron-9.0.0~b3~dev557/neutron/agent/agent_extension.py --- neutron-9.0.0~b2~dev280/neutron/agent/agent_extension.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/agent_extension.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class AgentExtension(object): + """Define stable abstract interface for agent extensions. + + An agent extension extends the agent core functionality. + """ + + @abc.abstractmethod + def initialize(self, connection, driver_type): + """Perform agent core resource extension initialization. + + :param connection: RPC connection that can be reused by the extension + to define its RPC endpoints + :param driver_type: a string that defines the agent type to the + extension. Can be used to choose the right backend + implementation. + + Called after all extensions have been loaded. + No resource (port, policy, router, etc.) handling will be called before + this method. + """ + + def consume_api(self, agent_api): + """Consume the AgentAPI instance from the AgentExtensionsManager. + + Allows an extension to gain access to resources internal to the + neutron agent and otherwise unavailable to the extension. Examples of + such resources include bridges, ports, and routers. + + :param agent_api: An instance of an agent-specific API. + """ diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/agent_extensions_manager.py neutron-9.0.0~b3~dev557/neutron/agent/agent_extensions_manager.py --- neutron-9.0.0~b2~dev280/neutron/agent/agent_extensions_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/agent_extensions_manager.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,64 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log +import stevedore + +from neutron._i18n import _, _LI + +LOG = log.getLogger(__name__) + + +AGENT_EXT_MANAGER_OPTS = [ + cfg.ListOpt('extensions', + default=[], + help=_('Extensions list to use')), +] + + +def register_opts(conf): + conf.register_opts(AGENT_EXT_MANAGER_OPTS, 'agent') + + +class AgentExtensionsManager(stevedore.named.NamedExtensionManager): + """Manage agent extensions.""" + + def __init__(self, conf, namespace): + super(AgentExtensionsManager, self).__init__( + namespace, conf.agent.extensions, + invoke_on_load=True, name_order=True) + LOG.info(_LI("Loaded agent extensions: %s"), self.names()) + + def initialize(self, connection, driver_type, agent_api=None): + """Initialize enabled agent extensions. + + :param connection: RPC connection that can be reused by extensions to + define their RPC endpoints + :param driver_type: a string that defines the agent type to the + extension. Can be used by the extension to choose + the right backend implementation. + :param agent_api: an AgentAPI instance that provides an API to + interact with the agent that the manager + is running in. + """ + # Initialize each agent extension in the list. + for extension in self: + LOG.info(_LI("Initializing agent extension '%s'"), extension.name) + # If the agent has provided an agent_api object, this object will + # be passed to all interested extensions. This object must be + # consumed by each such extension before the extension's + # intialize() method is called, as the initilization step + # relies on the agent_api already being available. + + extension.obj.consume_api(agent_api) + extension.obj.initialize(connection, driver_type) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/common/config.py neutron-9.0.0~b3~dev557/neutron/agent/common/config.py --- neutron-9.0.0~b2~dev280/neutron/agent/common/config.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/common/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -64,6 +64,12 @@ "generated iptables rules that describe each rule's " "purpose. System must support the iptables comments " "module for addition of comments.")), + cfg.BoolOpt('debug_iptables_rules', default=False, + help=_("Duplicate every iptables difference calculation to " + "ensure the format being generated matches the format " + "of iptables-save. This option should not be turned " + "on for production systems because it imposes a " + "performance penalty.")), ] PROCESS_MONITOR_OPTS = [ @@ -83,14 +89,14 @@ ] EXT_NET_BRIDGE_OPTS = [ - cfg.StrOpt('external_network_bridge', default='br-ex', + cfg.StrOpt('external_network_bridge', default='', deprecated_for_removal=True, help=_("Name of bridge used for external network " - "traffic. This should be set to an empty value for the " - "Linux Bridge. When this parameter is set, each L3 " - "agent can be associated with no more than one external " - "network. This option is deprecated and will be removed " - "in the M release.")), + "traffic. When this parameter is set, the L3 agent will " + "plug an interface directly into an external bridge " + "which will not allow any wiring by the L2 agent. Using " + "this will result in incorrect port statuses. This " + "option is deprecated and will be removed in Ocata.")) ] diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/common/ovs_lib.py neutron-9.0.0~b3~dev557/neutron/agent/common/ovs_lib.py --- neutron-9.0.0~b2~dev280/neutron/agent/common/ovs_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/common/ovs_lib.py 2016-08-03 20:10:33.000000000 +0000 @@ -30,15 +30,13 @@ from neutron.agent.common import utils from neutron.agent.linux import ip_lib from neutron.agent.ovsdb import api as ovsdb +from neutron.conf.agent import ovs_conf from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants UINT64_BITMASK = (1 << 64) - 1 -# Default timeout for ovs-vsctl command -DEFAULT_OVS_VSCTL_TIMEOUT = 10 - # Special return value for an invalid OVS ofport INVALID_OFPORT = -1 UNASSIGNED_OFPORT = [] @@ -47,14 +45,7 @@ FAILMODE_SECURE = 'secure' FAILMODE_STANDALONE = 'standalone' -OPTS = [ - cfg.IntOpt('ovs_vsctl_timeout', - default=DEFAULT_OVS_VSCTL_TIMEOUT, - help=_('Timeout in seconds for ovs-vsctl commands. ' - 'If the timeout expires, ovs commands will fail with ' - 'ALARMCLOCK error.')), -] -cfg.CONF.register_opts(OPTS) +ovs_conf.register_ovs_agent_opts() LOG = logging.getLogger(__name__) @@ -120,10 +111,7 @@ self.ovsdb.add_br(bridge_name, datapath_type).execute() - br = OVSBridge(bridge_name) - # Don't return until vswitchd sets up the internal port - br.get_port_ofport(bridge_name) - return br + return OVSBridge(bridge_name) def delete_bridge(self, bridge_name): self.ovsdb.del_br(bridge_name).execute() @@ -221,8 +209,6 @@ if secure_mode: txn.add(self.ovsdb.set_fail_mode(self.br_name, FAILMODE_SECURE)) - # Don't return until vswitchd sets up the internal port - self.get_port_ofport(self.br_name) def destroy(self): self.delete_bridge(self.br_name) @@ -248,8 +234,6 @@ if interface_attr_tuples: txn.add(self.ovsdb.db_set('Interface', port_name, *interface_attr_tuples)) - # Don't return until the port has been assigned by vswitchd - self.get_port_ofport(port_name) def delete_port(self, port_name): self.ovsdb.del_port(port_name, self.br_name).execute() @@ -403,7 +387,7 @@ execute(check_error=check_error, log_errors=log_errors)) # returns a VIF object for each VIF port - def get_vif_ports(self): + def get_vif_ports(self, ofport_filter=None): edge_ports = [] port_info = self.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], @@ -412,6 +396,8 @@ name = port['name'] external_ids = port['external_ids'] ofport = port['ofport'] + if ofport_filter and ofport in ofport_filter: + continue if "iface-id" in external_ids and "attached-mac" in external_ids: p = VifPort(name, ofport, external_ids["iface-id"], external_ids["attached-mac"], self) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/dhcp/config.py neutron-9.0.0~b3~dev557/neutron/agent/dhcp/config.py --- neutron-9.0.0~b2~dev280/neutron/agent/dhcp/config.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/dhcp/config.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,102 +0,0 @@ -# Copyright 2015 OpenStack Foundation -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from neutron._i18n import _ - -DHCP_AGENT_OPTS = [ - cfg.IntOpt('resync_interval', default=5, - help=_("The DHCP agent will resync its state with Neutron to " - "recover from any transient notification or RPC errors. " - "The interval is number of seconds between attempts.")), - cfg.StrOpt('dhcp_driver', - default='neutron.agent.linux.dhcp.Dnsmasq', - help=_("The driver used to manage the DHCP server.")), - cfg.BoolOpt('enable_isolated_metadata', default=False, - help=_("The DHCP server can assist with providing metadata " - "support on isolated networks. Setting this value to " - "True will cause the DHCP server to append specific " - "host routes to the DHCP request. The metadata service " - "will only be activated when the subnet does not " - "contain any router port. The guest instance must be " - "configured to request host routes via DHCP (Option " - "121). This option doesn't have any effect when " - "force_metadata is set to True.")), - cfg.BoolOpt('force_metadata', default=False, - help=_("In some cases the Neutron router is not present to " - "provide the metadata IP but the DHCP server can be " - "used to provide this info. Setting this value will " - "force the DHCP server to append specific host routes " - "to the DHCP request. If this option is set, then the " - "metadata service will be activated for all the " - "networks.")), - cfg.BoolOpt('enable_metadata_network', default=False, - help=_("Allows for serving metadata requests coming from a " - "dedicated metadata access network whose CIDR is " - "169.254.169.254/16 (or larger prefix), and is " - "connected to a Neutron router from which the VMs send " - "metadata:1 request. In this case DHCP Option 121 will " - "not be injected in VMs, as they will be able to reach " - "169.254.169.254 through a router. This option " - "requires enable_isolated_metadata = True.")), - cfg.IntOpt('num_sync_threads', default=4, - help=_('Number of threads to use during sync process. ' - 'Should not exceed connection pool size configured on ' - 'server.')) -] - -DHCP_OPTS = [ - cfg.StrOpt('dhcp_confs', - default='$state_path/dhcp', - help=_('Location to store DHCP server config files.')), - cfg.StrOpt('dhcp_domain', - default='openstacklocal', - help=_('Domain to use for building the hostnames. ' - 'This option is deprecated. It has been moved to ' - 'neutron.conf as dns_domain. It will be removed ' - 'in a future release.'), - deprecated_for_removal=True), -] - -DNSMASQ_OPTS = [ - cfg.StrOpt('dnsmasq_config_file', - default='', - help=_('Override the default dnsmasq settings ' - 'with this file.')), - cfg.ListOpt('dnsmasq_dns_servers', - default=[], - help=_('Comma-separated list of the DNS servers which will be ' - 'used as forwarders.')), - cfg.StrOpt('dnsmasq_base_log_dir', - help=_("Base log dir for dnsmasq logging. " - "The log contains DHCP and DNS log information and " - "is useful for debugging issues with either DHCP or " - "DNS. If this section is null, disable dnsmasq log.")), - cfg.BoolOpt('dnsmasq_local_resolv', default=False, - help=_("Enables the dnsmasq service to provide name " - "resolution for instances via DNS resolvers on the " - "host running the DHCP agent. Effectively removes the " - "'--no-resolv' option from the dnsmasq process " - "arguments. Adding custom DNS resolvers to the " - "'dnsmasq_dns_servers' option disables this feature.")), - cfg.IntOpt( - 'dnsmasq_lease_max', - default=(2 ** 24), - help=_('Limit number of leases to prevent a denial-of-service.')), - cfg.BoolOpt('dhcp_broadcast_reply', default=False, - help=_("Use broadcast in DHCP replies.")), -] diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/dhcp_agent.py neutron-9.0.0~b3~dev557/neutron/agent/dhcp_agent.py --- neutron-9.0.0~b2~dev280/neutron/agent/dhcp_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/dhcp_agent.py 2016-08-03 20:10:33.000000000 +0000 @@ -20,11 +20,11 @@ from oslo_service import service from neutron.agent.common import config -from neutron.agent.dhcp import config as dhcp_config from neutron.agent.linux import interface from neutron.agent.metadata import config as metadata_config from neutron.common import config as common_config from neutron.common import topics +from neutron.conf.agent import dhcp as dhcp_config from neutron import service as neutron_service @@ -32,9 +32,7 @@ config.register_interface_driver_opts_helper(conf) config.register_agent_state_opts_helper(conf) config.register_availability_zone_opts_helper(conf) - conf.register_opts(dhcp_config.DHCP_AGENT_OPTS) - conf.register_opts(dhcp_config.DHCP_OPTS) - conf.register_opts(dhcp_config.DNSMASQ_OPTS) + dhcp_config.register_agent_dhcp_opts(conf) conf.register_opts(metadata_config.DRIVER_OPTS) conf.register_opts(metadata_config.SHARED_OPTS) conf.register_opts(interface.OPTS) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l2/agent_extension.py neutron-9.0.0~b3~dev557/neutron/agent/l2/agent_extension.py --- neutron-9.0.0~b2~dev280/neutron/agent/l2/agent_extension.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l2/agent_extension.py 2016-08-03 20:10:33.000000000 +0000 @@ -17,52 +17,11 @@ import six +from neutron.agent.l2 import l2_agent_extension -@six.add_metaclass(abc.ABCMeta) -class AgentCoreResourceExtension(object): - """Define stable abstract interface for agent extensions. - An agent extension extends the agent core functionality. +@six.add_metaclass(abc.ABCMeta) +class AgentCoreResourceExtension(l2_agent_extension.L2AgentExtension): + """This is a shim around L2AgentExtension class. It is intended for use by + out of tree extensions that were inheriting AgentCoreResourceExtension. """ - - def initialize(self, connection, driver_type): - """Perform agent core resource extension initialization. - - :param connection: RPC connection that can be reused by the extension - to define its RPC endpoints - :param driver_type: a string that defines the agent type to the - extension. Can be used to choose the right backend - implementation. - - Called after all extensions have been loaded. - No port handling will be called before this method. - """ - - @abc.abstractmethod - def handle_port(self, context, data): - """Handle agent extension for port. - - This can be called on either create or update, depending on the - code flow. Thus, it's this function's responsibility to check what - actually changed. - - :param context: rpc context - :param data: port data - """ - - @abc.abstractmethod - def delete_port(self, context, data): - """Delete port from agent extension. - - :param context: rpc context - :param data: port data - """ - - def consume_api(self, agent_api): - """Consume the AgentAPI instance from the AgentExtensionsManager - - This allows extensions to gain access to resources limited to the - NeutronAgent. - - :param agent_api: An instance of an agent specific API - """ diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l2/extensions/fdb_population.py neutron-9.0.0~b3~dev557/neutron/agent/l2/extensions/fdb_population.py --- neutron-9.0.0~b2~dev280/neutron/agent/l2/extensions/fdb_population.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l2/extensions/fdb_population.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,181 @@ +# Copyright (c) 2016 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from neutron_lib import constants +from oslo_config import cfg +from oslo_log import log as logging + +from neutron._i18n import _, _LE, _LW +from neutron.agent.l2 import l2_agent_extension +from neutron.agent.linux import bridge_lib +from neutron.common import utils as n_utils +from neutron.plugins.ml2.drivers.linuxbridge.agent.common import ( + constants as linux_bridge_constants) +from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( + constants as ovs_constants) + +# if shared_physical_device_mappings is not configured KeyError will be thrown +fdb_population_opt = [ + cfg.ListOpt('shared_physical_device_mappings', default=[], + help=_("Comma-separated list of " + ": tuples mapping " + "physical network names to the agent's node-specific " + "shared physical network device between " + "SR-IOV and OVS or SR-IOV and linux bridge")) +] +cfg.CONF.register_opts(fdb_population_opt, 'FDB') + +LOG = logging.getLogger(__name__) + + +class FdbPopulationAgentExtension( + l2_agent_extension.L2AgentExtension): + """The FDB population is an agent extension to OVS or linux bridge + who's objective is to update the FDB table for existing instance + using normal port, thus enabling communication between SR-IOV instances + and normal instances. + Additional information describing the problem can be found here: + http://events.linuxfoundation.org/sites/events/files/slides/LinuxConJapan2014_makita_0.pdf + """ + + # FDB udpates are triggered for ports with a certain device_owner only: + # - device owner "compute": updates the FDB with normal port instances, + # required in order to enable communication between + # SR-IOV direct port instances and normal port instance. + # - device owner "router_interface": updates the FDB with OVS/LB ports, + # required in order to enable communication for SR-IOV instances + # with floating ip that are located with the network node. + # - device owner "DHCP": updates the FDB with the dhcp server. + # When the lease expires a unicast renew message is sent + # to the dhcp server. In case the FDB is not updated + # the message will be sent to the wire, causing the message + # to get lost in case the sender uses direct port and is + # located on the same hypervisor as the network node. + PERMITTED_DEVICE_OWNERS = {constants.DEVICE_OWNER_COMPUTE_PREFIX, + constants.DEVICE_OWNER_ROUTER_INTF, + constants.DEVICE_OWNER_DHCP} + + class FdbTableTracker(object): + """FDB table tracker is a helper class + intended to keep track of the existing FDB rules. + """ + def __init__(self, devices): + self.device_to_macs = {} + self.portid_to_mac = {} + # update macs already in the physical interface's FDB table + for device in devices: + try: + _stdout = bridge_lib.FdbInterface.show(device) + except RuntimeError as e: + LOG.warning(_LW( + 'Unable to find FDB Interface %(device)s. ' + 'Exception: %(e)s'), {'device': device, 'e': e}) + continue + self.device_to_macs[device] = _stdout.split()[::3] + + def update_port(self, device, port_id, mac): + # check if device is updated + if self.device_to_macs.get(device) == mac: + return + # delete invalid port_id's mac from the FDB, + # in case the port was updated to another mac + self.delete_port([device], port_id) + # update port id + self.portid_to_mac[port_id] = mac + # check if rule for mac already exists + if mac in self.device_to_macs[device]: + return + try: + bridge_lib.FdbInterface.add(mac, device) + except RuntimeError as e: + LOG.warning(_LW( + 'Unable to add mac %(mac)s ' + 'to FDB Interface %(device)s. ' + 'Exception: %(e)s'), + {'mac': mac, 'device': device, 'e': e}) + return + self.device_to_macs[device].append(mac) + + def delete_port(self, devices, port_id): + mac = self.portid_to_mac.get(port_id) + if mac is None: + LOG.warning(_LW('Port Id %(port_id)s does not have a rule for ' + 'devices %(devices)s in FDB table'), + {'port_id': port_id, 'devices': devices}) + return + for device in devices: + if mac in self.device_to_macs[device]: + try: + bridge_lib.FdbInterface.delete(mac, device) + except RuntimeError as e: + LOG.warning(_LW( + 'Unable to delete mac %(mac)s ' + 'from FDB Interface %(device)s. ' + 'Exception: %(e)s'), + {'mac': mac, 'device': device, 'e': e}) + return + self.device_to_macs[device].remove(mac) + del self.portid_to_mac[port_id] + + # class FdbPopulationAgentExtension implementation: + def initialize(self, connection, driver_type): + """Perform FDB Agent Extension initialization.""" + valid_driver_types = (linux_bridge_constants.EXTENSION_DRIVER_TYPE, + ovs_constants.EXTENSION_DRIVER_TYPE) + if driver_type not in valid_driver_types: + LOG.error(_LE('FDB extension is only supported for OVS and ' + 'linux bridge agent, currently uses ' + '%(driver_type)s'), {'driver_type': driver_type}) + sys.exit(1) + + self.device_mappings = n_utils.parse_mappings( + cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False) + devices = self._get_devices() + if not devices: + LOG.error(_LE('Invalid configuration provided for FDB extension: ' + 'no physical devices')) + sys.exit(1) + self.fdb_tracker = self.FdbTableTracker(devices) + + def handle_port(self, context, details): + """Handle agent FDB population extension for port.""" + device_owner = details['device_owner'] + if self._is_valid_device_owner(device_owner): + mac = details['mac_address'] + port_id = details['port_id'] + physnet = details.get('physical_network') + if physnet and physnet in self.device_mappings: + for device in self.device_mappings[physnet]: + self.fdb_tracker.update_port(device, port_id, mac) + + def delete_port(self, context, details): + """Delete port from FDB population extension.""" + port_id = details['port_id'] + devices = self._get_devices() + self.fdb_tracker.delete_port(devices, port_id) + + def _get_devices(self): + def _flatten_list(l): + return [item for sublist in l for item in sublist] + + return _flatten_list(self.device_mappings.values()) + + def _is_valid_device_owner(self, device_owner): + for permitted_device_owner in self.PERMITTED_DEVICE_OWNERS: + if device_owner.startswith(permitted_device_owner): + return True + return False diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l2/extensions/manager.py neutron-9.0.0~b3~dev557/neutron/agent/l2/extensions/manager.py --- neutron-9.0.0~b2~dev280/neutron/agent/l2/extensions/manager.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l2/extensions/manager.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,86 +0,0 @@ -# Copyright (c) 2015 Mellanox Technologies, Ltd -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_log import log -import stevedore - -from neutron._i18n import _, _LE, _LI - -LOG = log.getLogger(__name__) - - -L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions' -L2_AGENT_EXT_MANAGER_OPTS = [ - cfg.ListOpt('extensions', - default=[], - help=_('Extensions list to use')), -] - - -def register_opts(conf): - conf.register_opts(L2_AGENT_EXT_MANAGER_OPTS, 'agent') - - -class AgentExtensionsManager(stevedore.named.NamedExtensionManager): - """Manage agent extensions.""" - - def __init__(self, conf): - super(AgentExtensionsManager, self).__init__( - L2_AGENT_EXT_MANAGER_NAMESPACE, conf.agent.extensions, - invoke_on_load=True, name_order=True) - LOG.info(_LI("Loaded agent extensions: %s"), self.names()) - - def initialize(self, connection, driver_type, agent_api=None): - """Initialize enabled L2 agent extensions. - - :param connection: RPC connection that can be reused by extensions to - define their RPC endpoints - :param driver_type: a string that defines the agent type to the - extension. Can be used by the extension to choose - the right backend implementation. - :param agent_api: an AgentAPI instance that provides an API to - interact with the agent that the manager - is running in. - """ - # Initialize each agent extension in the list. - for extension in self: - LOG.info(_LI("Initializing agent extension '%s'"), extension.name) - extension.obj.consume_api(agent_api) - extension.obj.initialize(connection, driver_type) - - def handle_port(self, context, data): - """Notify all agent extensions to handle port.""" - for extension in self: - try: - extension.obj.handle_port(context, data) - except AttributeError: - LOG.exception( - _LE("Agent Extension '%(name)s' failed " - "while handling port update"), - {'name': extension.name} - ) - - def delete_port(self, context, data): - """Notify all agent extensions to delete port.""" - for extension in self: - try: - extension.obj.delete_port(context, data) - except AttributeError: - LOG.exception( - _LE("Agent Extension '%(name)s' failed " - "while handling port deletion"), - {'name': extension.name} - ) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l2/extensions/qos.py neutron-9.0.0~b3~dev557/neutron/agent/l2/extensions/qos.py --- neutron-9.0.0~b2~dev280/neutron/agent/l2/extensions/qos.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l2/extensions/qos.py 2016-08-03 20:10:33.000000000 +0000 @@ -22,7 +22,7 @@ import six from neutron._i18n import _LW, _LI -from neutron.agent.l2 import agent_extension +from neutron.agent.l2 import l2_agent_extension from neutron.agent.linux import tc_lib from neutron.api.rpc.callbacks.consumer import registry from neutron.api.rpc.callbacks import events @@ -185,13 +185,12 @@ del self.known_policies[qos_policy_id] -class QosAgentExtension(agent_extension.AgentCoreResourceExtension): - SUPPORTED_RESOURCES = [resources.QOS_POLICY] +class QosAgentExtension(l2_agent_extension.L2AgentExtension): + SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY] def initialize(self, connection, driver_type): - """Perform Agent Extension initialization. + """Initialize agent extension.""" - """ self.resource_rpc = resources_rpc.ResourcesPullRpcApi() self.qos_driver = manager.NeutronManager.load_class_for_provider( 'neutron.qos.agent_drivers', driver_type)() @@ -200,28 +199,35 @@ self.policy_map = PortPolicyMap() - registry.subscribe(self._handle_notification, resources.QOS_POLICY) self._register_rpc_consumers(connection) def consume_api(self, agent_api): + """Allows an extension to gain access to resources internal to the + neutron agent and otherwise unavailable to the extension. + """ self.agent_api = agent_api def _register_rpc_consumers(self, connection): + """Allows an extension to receive notifications of updates made to + items of interest. + """ endpoints = [resources_rpc.ResourcesPushRpcCallback()] - for resource_type in self.SUPPORTED_RESOURCES: - # we assume that neutron-server always broadcasts the latest + for resource_type in self.SUPPORTED_RESOURCE_TYPES: + # We assume that the neutron server always broadcasts the latest # version known to the agent + registry.subscribe(self._handle_notification, resource_type) topic = resources_rpc.resource_type_versioned_topic(resource_type) connection.create_consumer(topic, endpoints, fanout=True) @lockutils.synchronized('qos-port') - def _handle_notification(self, qos_policy, event_type): + def _handle_notification(self, qos_policies, event_type): # server does not allow to remove a policy that is attached to any # port, so we ignore DELETED events. Also, if we receive a CREATED # event for a policy, it means that there are no ports so far that are # attached to it. That's why we are interested in UPDATED events only if event_type == events.UPDATED: - self._process_update_policy(qos_policy) + for qos_policy in qos_policies: + self._process_update_policy(qos_policy) @lockutils.synchronized('qos-port') def handle_port(self, context, port): diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l2/l2_agent_extension.py neutron-9.0.0~b3~dev557/neutron/agent/l2/l2_agent_extension.py --- neutron-9.0.0~b2~dev280/neutron/agent/l2/l2_agent_extension.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l2/l2_agent_extension.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.agent import agent_extension + + +@six.add_metaclass(abc.ABCMeta) +class L2AgentExtension(agent_extension.AgentExtension): + """Define stable abstract interface for l2 agent extensions. + + An agent extension extends the agent core functionality. + """ + + def initialize(self, connection, driver_type): + """Initialize agent extension.""" + + @abc.abstractmethod + def handle_port(self, context, data): + """Handle agent extension for port. + + This can be called on either create or update, depending on the + code flow. Thus, it's this function's responsibility to check what + actually changed. + + :param context: rpc context + :param data: port data + """ + + @abc.abstractmethod + def delete_port(self, context, data): + """Delete port from agent extension. + + :param context: rpc context + :param data: port data + """ diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l2/l2_agent_extensions_manager.py neutron-9.0.0~b3~dev557/neutron/agent/l2/l2_agent_extensions_manager.py --- neutron-9.0.0~b2~dev280/neutron/agent/l2/l2_agent_extensions_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l2/l2_agent_extensions_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log + +from neutron._i18n import _LE +from neutron.agent import agent_extensions_manager as agent_ext_manager + +LOG = log.getLogger(__name__) + + +L2_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l2.extensions' + + +def register_opts(conf): + agent_ext_manager.register_opts(conf) + + +class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): + """Manage l2 agent extensions. The handle_port and delete_port methods are + guaranteed to be attributes of each extension because they have been + marked as abc.abstractmethod in the extensions' abstract class. + """ + + def __init__(self, conf): + super(L2AgentExtensionsManager, self).__init__(conf, + L2_AGENT_EXT_MANAGER_NAMESPACE) + + def handle_port(self, context, data): + """Notify all agent extensions to handle port.""" + for extension in self: + if hasattr(extension.obj, 'handle_port'): + extension.obj.handle_port(context, data) + else: + LOG.error( + _LE("Agent Extension '%(name)s' does not " + "implement method handle_port"), + {'name': extension.name} + ) + + def delete_port(self, context, data): + """Notify all agent extensions to delete port.""" + for extension in self: + if hasattr(extension.obj, 'delete_port'): + extension.obj.delete_port(context, data) + else: + LOG.error( + _LE("Agent Extension '%(name)s' does not " + "implement method delete_port"), + {'name': extension.name} + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/agent.py neutron-9.0.0~b3~dev557/neutron/agent/l3/agent.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/agent.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -32,6 +32,7 @@ from neutron.agent.l3 import dvr_local_router as dvr_local_router from neutron.agent.l3 import ha from neutron.agent.l3 import ha_router +from neutron.agent.l3 import l3_agent_extensions_manager as l3_ext_manager from neutron.agent.l3 import legacy_router from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces @@ -219,10 +220,12 @@ 'to retrieve service plugins enabled. ' 'Check connectivity to neutron server. ' 'Retrying... ' - 'Detailed message: %(msg)s.') % {'msg': e}) + 'Detailed message: %(msg)s.'), {'msg': e}) continue break + self.init_extension_manager(self.plugin_rpc) + self.metadata_driver = None if self.conf.enable_metadata_proxy: self.metadata_driver = metadata_driver.MetadataDriver(self) @@ -311,12 +314,12 @@ kwargs['host'] = self.host if router.get('distributed') and router.get('ha'): - if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT: + if self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT: kwargs['state_change_callback'] = self.enqueue_state_change return dvr_edge_ha_router.DvrEdgeHaRouter(*args, **kwargs) if router.get('distributed'): - if self.conf.agent_mode == l3_constants.L3_AGENT_MODE_DVR_SNAT: + if self.conf.agent_mode == lib_const.L3_AGENT_MODE_DVR_SNAT: return dvr_router.DvrEdgeRouter(*args, **kwargs) else: return dvr_local_router.DvrLocalRouter(*args, **kwargs) @@ -341,6 +344,7 @@ try: self._router_removed(router_id) + self.l3_ext_manager.delete_router(self.context, router_id) except Exception: LOG.exception(_LE('Error while deleting router %s'), router_id) return False @@ -363,6 +367,13 @@ registry.notify(resources.ROUTER, events.AFTER_DELETE, self, router=ri) + def init_extension_manager(self, connection): + l3_ext_manager.register_opts(self.conf) + self.l3_ext_manager = ( + l3_ext_manager.L3AgentExtensionsManager(self.conf)) + self.l3_ext_manager.initialize( + connection, lib_const.L3_AGENT_MODE) + def router_deleted(self, context, router_id): """Deal with router deletion RPC message.""" LOG.debug('Got router deleted notification for %s', router_id) @@ -426,6 +437,7 @@ ri.router = router ri.process(self) registry.notify(resources.ROUTER, events.AFTER_CREATE, self, router=ri) + self.l3_ext_manager.add_router(self.context, router) def _process_updated_router(self, router): ri = self.router_info[router['id']] @@ -434,6 +446,7 @@ self, router=ri) ri.process(self) registry.notify(resources.ROUTER, events.AFTER_UPDATE, self, router=ri) + self.l3_ext_manager.update_router(self.context, router) def _resync_router(self, router_update, priority=queue.PRIORITY_SYNC_ROUTERS_TASK): @@ -546,8 +559,12 @@ # need to keep fip namespaces as well ext_net_id = (r['external_gateway_info'] or {}).get( 'network_id') + is_snat_agent = (self.conf.agent_mode == + lib_const.L3_AGENT_MODE_DVR_SNAT) if ext_net_id: ns_manager.keep_ext_net(ext_net_id) + elif is_snat_agent: + ns_manager.ensure_snat_cleanup(r['id']) update = queue.RouterUpdate( r['id'], queue.PRIORITY_SYNC_ROUTERS_TASK, diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/config.py neutron-9.0.0~b3~dev557/neutron/agent/l3/config.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/config.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/config.py 2016-08-03 20:10:33.000000000 +0000 @@ -14,88 +14,13 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg +# TODO(asingh): https://review.openstack.org/#/c/338596/ refactors +# neutron.agent.l3.config to neutron.conf.agent.l3.config. +# neutron-fwaas/cmd/eventlet/agents/fw.py imports neutron.agent.l3.config +# This file will be removed when neutron-fwaas imports the updated file +# https://review.openstack.org/#/c/339177/ -from neutron._i18n import _ -from neutron.agent.common import config -from neutron.common import constants +import neutron.conf.agent.l3.config -OPTS = [ - cfg.StrOpt('agent_mode', default=constants.L3_AGENT_MODE_LEGACY, - choices=(constants.L3_AGENT_MODE_DVR, - constants.L3_AGENT_MODE_DVR_SNAT, - constants.L3_AGENT_MODE_LEGACY), - help=_("The working mode for the agent. Allowed modes are: " - "'legacy' - this preserves the existing behavior " - "where the L3 agent is deployed on a centralized " - "networking node to provide L3 services like DNAT, " - "and SNAT. Use this mode if you do not want to " - "adopt DVR. 'dvr' - this mode enables DVR " - "functionality and must be used for an L3 agent " - "that runs on a compute host. 'dvr_snat' - this " - "enables centralized SNAT support in conjunction " - "with DVR. This mode must be used for an L3 agent " - "running on a centralized node (or in single-host " - "deployments, e.g. devstack)")), - cfg.PortOpt('metadata_port', - default=9697, - help=_("TCP Port used by Neutron metadata namespace proxy.")), - cfg.IntOpt('send_arp_for_ha', - default=3, - help=_("Send this many gratuitous ARPs for HA setup, if " - "less than or equal to 0, the feature is disabled")), - cfg.BoolOpt('handle_internal_only_routers', - default=True, - help=_("Indicates that this L3 agent should also handle " - "routers that do not have an external network gateway " - "configured. This option should be True only for a " - "single agent in a Neutron deployment, and may be " - "False for all agents if all routers must have an " - "external network gateway.")), - cfg.StrOpt('gateway_external_network_id', default='', - help=_("When external_network_bridge is set, each L3 agent can " - "be associated with no more than one external network. " - "This value should be set to the UUID of that external " - "network. To allow L3 agent support multiple external " - "networks, both the external_network_bridge and " - "gateway_external_network_id must be left empty.")), - cfg.StrOpt('ipv6_gateway', default='', - help=_("With IPv6, the network used for the external gateway " - "does not need to have an associated subnet, since the " - "automatically assigned link-local address (LLA) can " - "be used. However, an IPv6 gateway address is needed " - "for use as the next-hop for the default route. " - "If no IPv6 gateway address is configured here, " - "(and only then) the neutron router will be configured " - "to get its default route from router advertisements " - "(RAs) from the upstream router; in which case the " - "upstream router must also be configured to send " - "these RAs. " - "The ipv6_gateway, when configured, should be the LLA " - "of the interface on the upstream router. If a " - "next-hop using a global unique address (GUA) is " - "desired, it needs to be done via a subnet allocated " - "to the network and not through this parameter. ")), - cfg.StrOpt('prefix_delegation_driver', - default='dibbler', - help=_('Driver used for ipv6 prefix delegation. This needs to ' - 'be an entry point defined in the ' - 'neutron.agent.linux.pd_drivers namespace. See ' - 'setup.cfg for entry points included with the neutron ' - 'source.')), - cfg.BoolOpt('enable_metadata_proxy', default=True, - help=_("Allow running metadata proxy.")), - cfg.StrOpt('metadata_access_mark', - default='0x1', - help=_('Iptables mangle mark used to mark metadata valid ' - 'requests. This mark will be masked with 0xffff so ' - 'that only the lower 16 bits will be used.')), - cfg.StrOpt('external_ingress_mark', - default='0x2', - help=_('Iptables mangle mark used to mark ingress from ' - 'external network. This mark will be masked with ' - '0xffff so that only the lower 16 bits will be used.')), -] - -OPTS += config.EXT_NET_BRIDGE_OPTS +OPTS = neutron.conf.agent.l3.config.OPTS diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_edge_ha_router.py neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_edge_ha_router.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_edge_ha_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_edge_ha_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,14 +15,14 @@ from neutron_lib import constants -from neutron.agent.l3.dvr_edge_router import DvrEdgeRouter +from neutron.agent.l3 import dvr_edge_router from neutron.agent.l3 import dvr_snat_ns -from neutron.agent.l3.ha_router import HaRouter -from neutron.agent.l3.router_info import RouterInfo -from neutron.common import constants as l3_constants +from neutron.agent.l3 import ha_router +from neutron.agent.l3 import router_info -class DvrEdgeHaRouter(DvrEdgeRouter, HaRouter): +class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, + ha_router.HaRouter): """Router class which represents a centralized SNAT DVR router with HA capabilities. """ @@ -31,7 +31,6 @@ super(DvrEdgeHaRouter, self).__init__(agent, host, *args, **kwargs) self.enable_snat = None - self.snat_ports = None @property def ha_namespace(self): @@ -41,7 +40,7 @@ def internal_network_added(self, port): # Call RouterInfo's internal_network_added (Plugs the port, adds IP) - RouterInfo.internal_network_added(self, port) + router_info.RouterInfo.internal_network_added(self, port) for subnet in port['subnets']: self._set_subnet_arp_info(subnet['id']) @@ -84,7 +83,8 @@ self._clear_vips(interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): - HaRouter.external_gateway_updated(self, ex_gw_port, interface_name) + ha_router.HaRouter.external_gateway_updated(self, ex_gw_port, + interface_name) def initialize(self, process_monitor): self._create_snat_namespace() @@ -96,7 +96,7 @@ self.enable_keepalived() def get_router_cidrs(self, device): - return RouterInfo.get_router_cidrs(self, device) + return router_info.RouterInfo.get_router_cidrs(self, device) def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): @@ -104,7 +104,7 @@ def _is_this_snat_host(self): return (self.agent_conf.agent_mode - == l3_constants.L3_AGENT_MODE_DVR_SNAT) + == constants.L3_AGENT_MODE_DVR_SNAT) def _dvr_internal_network_removed(self, port): super(DvrEdgeHaRouter, self)._dvr_internal_network_removed(port) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_edge_router.py neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_edge_router.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_edge_router.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_edge_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from oslo_log import log as logging from neutron._i18n import _LE @@ -29,7 +29,8 @@ def __init__(self, agent, host, *args, **kwargs): super(DvrEdgeRouter, self).__init__(agent, host, *args, **kwargs) - self.snat_namespace = None + self.snat_namespace = dvr_snat_ns.SnatNamespace( + self.router_id, self.agent_conf, self.driver, self.use_ipv6) self.snat_iptables_manager = None def external_gateway_added(self, ex_gw_port, interface_name): @@ -42,19 +43,27 @@ # the same routes to the snat namespace after the gateway port # is added, we need to call routes_updated here. self.routes_updated([], self.router['routes']) + elif self.snat_namespace.exists(): + # This is the case where the snat was moved manually or + # rescheduled to a different agent when the agent was dead. + LOG.debug("SNAT was moved or rescheduled to a different host " + "and does not match with the current host. This is " + "a stale namespace %s and will be cleared from the " + "current dvr_snat host.", self.snat_namespace.name) + self.external_gateway_removed(ex_gw_port, interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): if not self._is_this_snat_host(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) - if self.snat_namespace: + if self.snat_namespace.exists(): LOG.debug("SNAT was rescheduled to host %s. Clearing snat " "namespace.", self.router.get('gw_port_host')) return self.external_gateway_removed( ex_gw_port, interface_name) return - if not self.snat_namespace: + if not self.snat_namespace.exists(): # SNAT might be rescheduled to this agent; need to process like # newly created gateway return self.external_gateway_added(ex_gw_port, interface_name) @@ -67,7 +76,7 @@ def _external_gateway_removed(self, ex_gw_port, interface_name): super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port, interface_name) - if not self._is_this_snat_host() and not self.snat_namespace: + if not self._is_this_snat_host() and not self.snat_namespace.exists(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) return @@ -79,9 +88,8 @@ def external_gateway_removed(self, ex_gw_port, interface_name): self._external_gateway_removed(ex_gw_port, interface_name) - if self.snat_namespace: + if self.snat_namespace.exists(): self.snat_namespace.delete() - self.snat_namespace = None def internal_network_added(self, port): super(DvrEdgeRouter, self).internal_network_added(port) @@ -155,10 +163,6 @@ # TODO(mlavalle): in the near future, this method should contain the # code in the L3 agent that creates a gateway for a dvr. The first step # is to move the creation of the snat namespace here - self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'], - self.agent_conf, - self.driver, - self.use_ipv6) self.snat_namespace.create() return self.snat_namespace @@ -197,12 +201,10 @@ def update_routing_table(self, operation, route): if self.get_ex_gw_port() and self._is_this_snat_host(): - ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name( - self.router['id']) + ns_name = self.snat_namespace.name # NOTE: For now let us apply the static routes both in SNAT # namespace and Router Namespace, to reduce the complexity. - ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) - if ip_wrapper.netns.exists(ns_name): + if self.snat_namespace.exists(): super(DvrEdgeRouter, self)._update_routing_table( operation, route, namespace=ns_name) else: @@ -212,7 +214,7 @@ def delete(self, agent): super(DvrEdgeRouter, self).delete(agent) - if self.snat_namespace: + if self.snat_namespace.exists(): self.snat_namespace.delete() def process_address_scope(self): @@ -233,8 +235,8 @@ if external_port: external_port_scopemark = self._get_port_devicename_scopemark( [external_port], self.get_external_device_name) - for ip_version in (l3_constants.IP_VERSION_4, - l3_constants.IP_VERSION_6): + for ip_version in (lib_constants.IP_VERSION_4, + lib_constants.IP_VERSION_6): ports_scopemark[ip_version].update( external_port_scopemark[ip_version]) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_fip_ns.py neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_fip_ns.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_fip_ns.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_fip_ns.py 2016-08-29 20:05:49.000000000 +0000 @@ -110,6 +110,18 @@ prefix=FIP_EXT_DEV_PREFIX, mtu=ex_gw_port.get('mtu')) + # Remove stale fg devices + ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) + devices = ip_wrapper.get_devices() + for device in devices: + name = device.name + if name.startswith(FIP_EXT_DEV_PREFIX) and name != interface_name: + ext_net_bridge = self.agent_conf.external_network_bridge + self.driver.unplug(name, + bridge=ext_net_bridge, + namespace=ns_name, + prefix=FIP_EXT_DEV_PREFIX) + ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name, clean_connections=True) @@ -117,18 +129,16 @@ self.update_gateway_port(ex_gw_port) cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name] - # TODO(Carl) mlavelle's work has self.ip_wrapper - ip_wrapper = ip_lib.IPWrapper(namespace=ns_name) ip_wrapper.netns.execute(cmd, check_exit_code=False) def create(self): - # TODO(Carl) Get this functionality from mlavelle's namespace baseclass LOG.debug("DVR: add fip namespace: %s", self.name) - ip_wrapper_root = ip_lib.IPWrapper() - ip_wrapper = ip_wrapper_root.ensure_namespace(self.get_name()) + # parent class will ensure the namespace exists and turn-on forwarding + super(FipNamespace, self).create() # Somewhere in the 3.19 kernel timeframe ip_nonlocal_bind was # changed to be a per-namespace attribute. To be backwards # compatible we need to try both if at first we fail. + ip_wrapper = ip_lib.IPWrapper(namespace=self.name) try: ip_wrapper.netns.execute(['sysctl', '-w', @@ -139,15 +149,10 @@ LOG.debug('DVR: fip namespace (%s) does not support setting ' 'net.ipv4.ip_nonlocal_bind, trying in root namespace', self.name) - ip_wrapper_root.netns.execute(['sysctl', - '-w', - 'net.ipv4.ip_nonlocal_bind=1'], - run_as_root=True) - - ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) - if self.use_ipv6: - ip_wrapper.netns.execute(['sysctl', '-w', - 'net.ipv6.conf.all.forwarding=1']) + self.ip_wrapper_root.netns.execute(['sysctl', + '-w', + 'net.ipv4.ip_nonlocal_bind=1'], + run_as_root=True) # no connection tracking needed in fip namespace self._iptables_manager.ipv4['raw'].add_rule('PREROUTING', @@ -156,6 +161,11 @@ def delete(self): self.destroyed = True + self._delete() + self.agent_gateway_port = None + + @namespaces.check_ns_existence + def _delete(self): ip_wrapper = ip_lib.IPWrapper(namespace=self.name) for d in ip_wrapper.get_devices(exclude_loopback=True): if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX): @@ -170,7 +180,6 @@ bridge=ext_net_bridge, namespace=self.name, prefix=FIP_EXT_DEV_PREFIX) - self.agent_gateway_port = None # TODO(mrsmith): add LOG warn if fip count != 0 LOG.debug('DVR: destroy fip namespace: %s', self.name) @@ -228,6 +237,10 @@ if is_gateway_not_in_subnet: ipd.route.add_route(gw_ip, scope='link') ipd.route.add_gateway(gw_ip) + else: + current_gateway = ipd.route.get_gateway() + if current_gateway and current_gateway.get('gateway'): + ipd.route.delete_gateway(current_gateway.get('gateway')) def _add_cidr_to_device(self, device, ip_cidr): if not device.addr.list(to=ip_cidr): @@ -252,8 +265,7 @@ rtr_2_fip_dev, fip_2_rtr_dev = ip_wrapper.add_veth(rtr_2_fip_name, fip_2_rtr_name, fip_ns_name) - mtu = (self.agent_conf.network_device_mtu or - ri.get_ex_gw_port().get('mtu')) + mtu = ri.get_ex_gw_port().get('mtu') if mtu: rtr_2_fip_dev.link.set_mtu(mtu) fip_2_rtr_dev.link.set_mtu(mtu) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_local_router.py neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_local_router.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_local_router.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_local_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,7 +16,8 @@ import collections import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants +from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import excutils import six @@ -26,7 +27,6 @@ from neutron.agent.l3 import dvr_router_base from neutron.agent.linux import ip_lib from neutron.common import constants as n_const -from neutron.common import exceptions from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) @@ -77,23 +77,18 @@ rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) mark_traffic_to_floating_ip = ( - 'floatingip', '-d %s -i %s -j MARK --set-xmark %s' % ( + 'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % ( floating_ip, rtr_2_fip_name, internal_mark)) mark_traffic_from_fixed_ip = ( - 'FORWARD', '-s %s -j $float-snat' % fixed_ip) + 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] def floating_ip_added_dist(self, fip, fip_cidr): """Add floating IP to FIP namespace.""" floating_ip = fip['floating_ip_address'] fixed_ip = fip['fixed_ip_address'] - rule_pr = self.fip_ns.allocate_rule_priority(floating_ip) - self.floating_ips_dict[floating_ip] = rule_pr + self._add_floating_ip_rule(floating_ip, fixed_ip) fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) - ip_rule = ip_lib.IPRule(namespace=self.ns_name) - ip_rule.rule.add(ip=fixed_ip, - table=dvr_fip_ns.FIP_RT_TBL, - priority=rule_pr) #Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() if self.rtr_fip_subnet is None: @@ -112,6 +107,24 @@ # update internal structures self.dist_fip_count = self.dist_fip_count + 1 + def _add_floating_ip_rule(self, floating_ip, fixed_ip): + rule_pr = self.fip_ns.allocate_rule_priority(floating_ip) + self.floating_ips_dict[floating_ip] = rule_pr + ip_rule = ip_lib.IPRule(namespace=self.ns_name) + ip_rule.rule.add(ip=fixed_ip, + table=dvr_fip_ns.FIP_RT_TBL, + priority=rule_pr) + + def _remove_floating_ip_rule(self, floating_ip): + if floating_ip in self.floating_ips_dict: + rule_pr = self.floating_ips_dict[floating_ip] + ip_rule = ip_lib.IPRule(namespace=self.ns_name) + ip_rule.rule.delete(ip=floating_ip, + table=dvr_fip_ns.FIP_RT_TBL, + priority=rule_pr) + self.fip_ns.deallocate_rule_priority(floating_ip) + #TODO(rajeev): Handle else case - exception/log? + def floating_ip_removed_dist(self, fip_cidr): """Remove floating IP from FIP namespace.""" floating_ip = fip_cidr.split('/')[0] @@ -123,14 +136,7 @@ rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair() fip_ns_name = self.fip_ns.get_name() - if floating_ip in self.floating_ips_dict: - rule_pr = self.floating_ips_dict[floating_ip] - ip_rule = ip_lib.IPRule(namespace=self.ns_name) - ip_rule.rule.delete(ip=floating_ip, - table=dvr_fip_ns.FIP_RT_TBL, - priority=rule_pr) - self.fip_ns.deallocate_rule_priority(floating_ip) - #TODO(rajeev): Handle else case - exception/log? + self._remove_floating_ip_rule(floating_ip) device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name) @@ -147,18 +153,28 @@ self.rtr_fip_subnet = None ns_ip.del_veth(fip_2_rtr_name) + def floating_ip_moved_dist(self, fip): + """Handle floating IP move between fixed IPs.""" + floating_ip = fip['floating_ip_address'] + self._remove_floating_ip_rule(floating_ip) + self._add_floating_ip_rule(floating_ip, fip['fixed_ip_address']) + def add_floating_ip(self, fip, interface_name, device): # Special Handling for DVR - update FIP namespace ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) self.floating_ip_added_dist(fip, ip_cidr) - return l3_constants.FLOATINGIP_STATUS_ACTIVE + return lib_constants.FLOATINGIP_STATUS_ACTIVE def remove_floating_ip(self, device, ip_cidr): self.floating_ip_removed_dist(ip_cidr) + def move_floating_ip(self, fip): + self.floating_ip_moved_dist(fip) + return lib_constants.FLOATINGIP_STATUS_ACTIVE + def _get_internal_port(self, subnet_id): """Return internal router port based on subnet_id.""" - router_ports = self.router.get(l3_constants.INTERFACE_KEY, []) + router_ports = self.router.get(lib_constants.INTERFACE_KEY, []) for port in router_ports: fips = port['fixed_ips'] for f in fips: @@ -236,7 +252,7 @@ subnet_ports = self.agent.get_ports_by_subnet(subnet_id) for p in subnet_ports: - if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS: + if p['device_owner'] not in lib_constants.ROUTER_INTERFACE_OWNERS: for fixed_ip in p['fixed_ips']: self._update_arp_entry(fixed_ip['ip_address'], p['mac_address'], @@ -444,7 +460,7 @@ def _get_address_scope_mark(self): # Prepare address scope iptables rule for internal ports - internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) + internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self.get_internal_device_name) # DVR local router will use rfp port as external port @@ -458,7 +474,7 @@ ext_scope = self._get_external_address_scope() ext_scope_mark = self.get_address_scope_mark_mask(ext_scope) - ports_scopemark[l3_constants.IP_VERSION_4][ext_device_name] = ( + ports_scopemark[lib_constants.IP_VERSION_4][ext_device_name] = ( ext_scope_mark) return ports_scopemark @@ -520,7 +536,7 @@ self.router_id) rtr_2_fip, _fip_2_rtr = self.rtr_fip_subnet.get_pair() exist_routes = device.route.list_routes( - l3_constants.IP_VERSION_4, via=str(rtr_2_fip.ip)) + lib_constants.IP_VERSION_4, via=str(rtr_2_fip.ip)) return {common_utils.ip_to_cidr(route['cidr']) for route in exist_routes} diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_router_base.py neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_router_base.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_router_base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_router_base.py 2016-08-29 20:05:49.000000000 +0000 @@ -25,6 +25,7 @@ self.agent = agent self.host = host + self.snat_ports = None def process(self, agent): super(DvrRouterBase, self).process(agent) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_snat_ns.py neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_snat_ns.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/dvr_snat_ns.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/dvr_snat_ns.py 2016-08-29 20:05:49.000000000 +0000 @@ -10,11 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron_lib import constants from oslo_log import log as logging from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib -from neutron.common import constants LOG = logging.getLogger(__name__) SNAT_NS_PREFIX = 'snat-' @@ -33,6 +33,7 @@ def get_snat_ns_name(cls, router_id): return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id) + @namespaces.check_ns_existence def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(exclude_loopback=True): diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/ha.py neutron-9.0.0~b3~dev557/neutron/agent/l3/ha.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/ha.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/ha.py 2016-08-29 20:05:49.000000000 +0000 @@ -134,15 +134,13 @@ # include any IPv6 subnet, enable the gateway interface to accept # Router Advts from upstream router for default route. ex_gw_port_id = ri.ex_gw_port and ri.ex_gw_port['id'] - if state == 'master' and ex_gw_port_id and ri.use_ipv6: - gateway_ips = ri._get_external_gw_ips(ri.ex_gw_port) - if not ri.is_v6_gateway_set(gateway_ips): - interface_name = ri.get_external_device_name(ex_gw_port_id) - if ri.router.get('distributed', False): - namespace = ri.ha_namespace - else: - namespace = ri.ns_name - ri.driver.configure_ipv6_ra(namespace, interface_name) + if state == 'master' and ex_gw_port_id: + interface_name = ri.get_external_device_name(ex_gw_port_id) + if ri.router.get('distributed', False): + namespace = ri.ha_namespace + else: + namespace = ri.ns_name + ri._enable_ra_on_gw(ri.ex_gw_port, namespace, interface_name) def _update_metadata_proxy(self, ri, router_id, state): if state == 'master': diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/ha_router.py neutron-9.0.0~b3~dev557/neutron/agent/l3/ha_router.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/ha_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/ha_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -189,17 +189,15 @@ def _add_default_gw_virtual_route(self, ex_gw_port, interface_name): gateway_ips = self._get_external_gw_ips(ex_gw_port) - if not gateway_ips: - return default_gw_rts = [] instance = self._get_keepalived_instance() for gw_ip in gateway_ips: - # TODO(Carl) This is repeated everywhere. A method would - # be nice. - default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version] - default_gw_rts.append(keepalived.KeepalivedVirtualRoute( - default_gw, gw_ip, interface_name)) + # TODO(Carl) This is repeated everywhere. A method would + # be nice. + default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version] + default_gw_rts.append(keepalived.KeepalivedVirtualRoute( + default_gw, gw_ip, interface_name)) instance.virtual_routes.gateway_routes = default_gw_rts def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name): @@ -354,6 +352,8 @@ self._plug_external_gateway(ex_gw_port, interface_name, self.ns_name) self._add_gateway_vip(ex_gw_port, interface_name) self._disable_ipv6_addressing_on_interface(interface_name) + if self.ha_state == 'master': + self._enable_ra_on_gw(ex_gw_port, self.ns_name, interface_name) def external_gateway_updated(self, ex_gw_port, interface_name): self._plug_external_gateway( diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/item_allocator.py neutron-9.0.0~b3~dev557/neutron/agent/l3/item_allocator.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/item_allocator.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/item_allocator.py 2016-08-03 20:10:33.000000000 +0000 @@ -14,6 +14,12 @@ import os +from oslo_log import log as logging + +from neutron._i18n import _LW + +LOG = logging.getLogger(__name__) + class ItemAllocator(object): """Manages allocation of items from a pool @@ -42,11 +48,21 @@ self.remembered = {} self.pool = item_pool + read_error = False for line in self._read(): - key, saved_value = line.strip().split(delimiter) - self.remembered[key] = self.ItemClass(saved_value) + try: + key, saved_value = line.strip().split(delimiter) + self.remembered[key] = self.ItemClass(saved_value) + except ValueError: + read_error = True + LOG.warning(_LW("Invalid line in %(file)s, " + "ignoring: %(line)s"), + {'file': state_file, 'line': line}) self.pool.difference_update(self.remembered.values()) + if read_error: + LOG.debug("Re-writing file %s due to read error", state_file) + self._write_allocations() def allocate(self, key): """Try to allocate an item of ItemClass type. diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/keepalived_state_change.py neutron-9.0.0~b3~dev557/neutron/agent/l3/keepalived_state_change.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/keepalived_state_change.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/keepalived_state_change.py 2016-08-29 20:05:49.000000000 +0000 @@ -26,6 +26,7 @@ from neutron.agent.linux import ip_monitor from neutron.agent.linux import utils as agent_utils from neutron.common import config +from neutron.conf.agent.l3 import keepalived LOG = logging.getLogger(__name__) @@ -97,32 +98,6 @@ LOG.debug('Notified agent router %s, state %s', self.router_id, state) -def register_opts(conf): - conf.register_cli_opt( - cfg.StrOpt('router_id', help=_('ID of the router'))) - conf.register_cli_opt( - cfg.StrOpt('namespace', help=_('Namespace of the router'))) - conf.register_cli_opt( - cfg.StrOpt('conf_dir', help=_('Path to the router directory'))) - conf.register_cli_opt( - cfg.StrOpt('monitor_interface', help=_('Interface to monitor'))) - conf.register_cli_opt( - cfg.StrOpt('monitor_cidr', help=_('CIDR to monitor'))) - conf.register_cli_opt( - cfg.StrOpt('pid_file', help=_('Path to PID file for this process'))) - conf.register_cli_opt( - cfg.StrOpt('user', help=_('User (uid or name) running this process ' - 'after its initialization'))) - conf.register_cli_opt( - cfg.StrOpt('group', help=_('Group (gid or name) running this process ' - 'after its initialization'))) - conf.register_opt( - cfg.StrOpt('metadata_proxy_socket', - default='$state_path/metadata_proxy', - help=_('Location of Metadata Proxy UNIX domain ' - 'socket'))) - - def configure(conf): config.init(sys.argv[1:]) conf.set_override('log_dir', cfg.CONF.conf_dir) @@ -131,7 +106,8 @@ def main(): - register_opts(cfg.CONF) + keepalived.register_cli_l3_agent_keepalived_opts() + keepalived.register_l3_agent_keepalived_opts() configure(cfg.CONF) MonitorDaemon(cfg.CONF.pid_file, cfg.CONF.router_id, diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/l3_agent_extension.py neutron-9.0.0~b3~dev557/neutron/agent/l3/l3_agent_extension.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/l3_agent_extension.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/l3_agent_extension.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,55 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + +from neutron.agent import agent_extension + + +@six.add_metaclass(abc.ABCMeta) +class L3AgentCoreResourceExtension(agent_extension.AgentCoreResourceExtension): + """Define stable abstract interface for l3 agent extensions. + + An agent extension extends the agent core functionality. + """ + + @abc.abstractmethod + def add_router(self, context, data): + """add agent extension for router. + + Called on router create. + + :param context: rpc context + :param data: router data + """ + + @abc.abstractmethod + def update_router(self, context, data): + """Handle agent extension for update. + + Called on router update. + + :param context: rpc context + :param data: router data + """ + + @abc.abstractmethod + def delete_router(self, context, data): + """Delete router from agent extension. + + :param context: rpc context + :param data: router data + """ diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/l3_agent_extensions_manager.py neutron-9.0.0~b3~dev557/neutron/agent/l3/l3_agent_extensions_manager.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/l3_agent_extensions_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/l3_agent_extensions_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,72 @@ +# Copyright (c) 2015 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log + +from neutron._i18n import _LE +from neutron.agent import agent_extensions_manager as agent_ext_manager + +LOG = log.getLogger(__name__) + + +L3_AGENT_EXT_MANAGER_NAMESPACE = 'neutron.agent.l3.extensions' + + +def register_opts(conf): + agent_ext_manager.register_opts(conf) + + +class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): + """Manage l3 agent extensions.""" + + def __init__(self, conf): + super(L3AgentExtensionsManager, self).__init__(conf, + L3_AGENT_EXT_MANAGER_NAMESPACE) + + def add_router(self, context, data): + """Notify all agent extensions to add router.""" + for extension in self: + if hasattr(extension.obj, 'add_router'): + extension.obj.add_router(context, data) + else: + LOG.error( + _LE("Agent Extension '%(name)s' does not " + "implement method add_router"), + {'name': extension.name} + ) + + def update_router(self, context, data): + """Notify all agent extensions to update router.""" + for extension in self: + if hasattr(extension.obj, 'update_router'): + extension.obj.update_router(context, data) + else: + LOG.error( + _LE("Agent Extension '%(name)s' does not " + "implement method update_router"), + {'name': extension.name} + ) + + def delete_router(self, context, data): + """Notify all agent extensions to delete router.""" + for extension in self: + if hasattr(extension.obj, 'delete_router'): + extension.obj.delete_router(context, data) + else: + LOG.error( + _LE("Agent Extension '%(name)s' does not " + "implement method delete_router"), + {'name': extension.name} + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/legacy_router.py neutron-9.0.0~b3~dev557/neutron/agent/l3/legacy_router.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/legacy_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/legacy_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from neutron.agent.l3 import router_info as router from neutron.agent.linux import ip_lib @@ -21,7 +21,7 @@ class LegacyRouter(router.RouterInfo): def add_floating_ip(self, fip, interface_name, device): if not self._add_fip_addr_to_device(fip, device): - return l3_constants.FLOATINGIP_STATUS_ERROR + return lib_constants.FLOATINGIP_STATUS_ERROR # As GARP is processed in a distinct thread the call below # won't raise an exception to be handled. @@ -29,4 +29,4 @@ interface_name, fip['floating_ip_address'], self.agent_conf) - return l3_constants.FLOATINGIP_STATUS_ACTIVE + return lib_constants.FLOATINGIP_STATUS_ACTIVE diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/link_local_allocator.py neutron-9.0.0~b3~dev557/neutron/agent/l3/link_local_allocator.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/link_local_allocator.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/link_local_allocator.py 2016-08-03 20:10:33.000000000 +0000 @@ -35,7 +35,7 @@ These link local addresses are used for routing inside the fip namespaces. The associations need to persist across agent restarts to maintain consistency. Without this, there is disruption in network connectivity - as the agent rewires the connections with the new IP address assocations. + as the agent rewires the connections with the new IP address associations. Persisting these in the database is unnecessary and would degrade performance. diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/namespace_manager.py neutron-9.0.0~b3~dev557/neutron/agent/l3/namespace_manager.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/namespace_manager.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/namespace_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -130,6 +130,10 @@ ns_prefix, ns_id = self.get_prefix_and_id(ns) self._cleanup(ns_prefix, ns_id) + def ensure_snat_cleanup(self, router_id): + prefix = dvr_snat_ns.SNAT_NS_PREFIX + self._cleanup(prefix, router_id) + def _cleanup(self, ns_prefix, ns_id): ns_class = self.ns_prefix_to_class_map[ns_prefix] ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/namespaces.py neutron-9.0.0~b3~dev557/neutron/agent/l3/namespaces.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/namespaces.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/namespaces.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,9 +13,12 @@ # under the License. # +import functools + from oslo_log import log as logging +from oslo_utils import excutils -from neutron._i18n import _LE +from neutron._i18n import _LE, _LW from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) @@ -58,6 +61,25 @@ return ns_name[dash_index + 1:] +def check_ns_existence(f): + @functools.wraps(f) + def wrapped(self, *args, **kwargs): + if not self.exists(): + LOG.warning(_LW('Namespace %(name)s does not exist. Skipping ' + '%(func)s'), + {'name': self.name, 'func': f.__name__}) + return + try: + return f(self, *args, **kwargs) + except RuntimeError: + with excutils.save_and_reraise_exception() as ctx: + if not self.exists(): + LOG.debug('Namespace %(name)s was concurrently deleted', + self.name) + ctx.reraise = False + return wrapped + + class Namespace(object): def __init__(self, name, agent_conf, driver, use_ipv6): @@ -82,6 +104,9 @@ msg = _LE('Failed trying to delete namespace: %s') LOG.exception(msg, self.name) + def exists(self): + return self.ip_wrapper_root.netns.exists(self.name) + class RouterNamespace(Namespace): @@ -95,6 +120,7 @@ def _get_ns_name(cls, router_id): return build_ns_name(NS_PREFIX, router_id) + @check_ns_existence def delete(self): ns_ip = ip_lib.IPWrapper(namespace=self.name) for d in ns_ip.get_devices(exclude_loopback=True): diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3/router_info.py neutron-9.0.0~b3~dev557/neutron/agent/l3/router_info.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3/router_info.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3/router_info.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,7 +14,7 @@ import collections import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from oslo_log import log as logging from neutron._i18n import _, _LE, _LW @@ -50,6 +50,7 @@ self.router_id = router_id self.ex_gw_port = None self._snat_enabled = None + self.fip_map = {} self.internal_ports = [] self.floating_ips = set() # Invoke the setter for establishing initial SNAT action @@ -142,7 +143,7 @@ def get_floating_ips(self): """Filter Floating IPs to be hosted on this agent.""" - return self.router.get(l3_constants.FLOATINGIP_KEY, []) + return self.router.get(lib_constants.FLOATINGIP_KEY, []) def floating_forward_rules(self, floating_ip, fixed_ip): return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % @@ -154,10 +155,10 @@ def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): mark_traffic_to_floating_ip = ( - 'floatingip', '-d %s -j MARK --set-xmark %s' % ( + 'floatingip', '-d %s/32 -j MARK --set-xmark %s' % ( floating_ip, internal_mark)) mark_traffic_from_fixed_ip = ( - 'FORWARD', '-s %s -j $float-snat' % fixed_ip) + 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) return [mark_traffic_to_floating_ip, mark_traffic_from_fixed_ip] def get_address_scope_mark_mask(self, address_scope=None): @@ -225,7 +226,7 @@ ports_scopemark = self._get_address_scope_mark() devices_in_ext_scope = { device for device, mark - in ports_scopemark[l3_constants.IP_VERSION_4].items() + in ports_scopemark[lib_constants.IP_VERSION_4].items() if mark == ext_scope_mark} # Add address scope for floatingip egress for device in devices_in_ext_scope: @@ -277,6 +278,9 @@ def remove_floating_ip(self, device, ip_cidr): device.delete_addr_and_conntrack_state(ip_cidr) + def move_floating_ip(self, fip): + return lib_constants.FLOATINGIP_STATUS_ACTIVE + def remove_external_gateway_ip(self, device, ip_cidr): device.delete_addr_and_conntrack_state(ip_cidr) @@ -299,6 +303,7 @@ device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) existing_cidrs = self.get_router_cidrs(device) new_cidrs = set() + gw_cidrs = self._get_gw_ips_cidr() floating_ips = self.get_floating_ips() # Loop once to ensure that floating ips are configured. @@ -306,19 +311,26 @@ fip_ip = fip['floating_ip_address'] ip_cidr = common_utils.ip_to_cidr(fip_ip) new_cidrs.add(ip_cidr) - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE + fip_statuses[fip['id']] = lib_constants.FLOATINGIP_STATUS_ACTIVE if ip_cidr not in existing_cidrs: fip_statuses[fip['id']] = self.add_floating_ip( fip, interface_name, device) LOG.debug('Floating ip %(id)s added, status %(status)s', {'id': fip['id'], 'status': fip_statuses.get(fip['id'])}) + elif (fip_ip in self.fip_map and + self.fip_map[fip_ip] != fip['fixed_ip_address']): + LOG.debug("Floating IP was moved from fixed IP " + "%(old)s to %(new)s", + {'old': self.fip_map[fip_ip], + 'new': fip['fixed_ip_address']}) + fip_statuses[fip['id']] = self.move_floating_ip(fip) elif fip_statuses[fip['id']] == fip['status']: # mark the status as not changed. we can't remove it because # that's how the caller determines that it was removed fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE fips_to_remove = ( - ip_cidr for ip_cidr in existing_cidrs - new_cidrs + ip_cidr for ip_cidr in existing_cidrs - new_cidrs - gw_cidrs if common_utils.is_cidr_host(ip_cidr)) for ip_cidr in fips_to_remove: LOG.debug("Removing floating ip %s from interface %s in " @@ -327,6 +339,17 @@ return fip_statuses + def _get_gw_ips_cidr(self): + gw_cidrs = set() + ex_gw_port = self.get_ex_gw_port() + if ex_gw_port: + for ip_addr in ex_gw_port['fixed_ips']: + ex_gw_ip = ip_addr['ip_address'] + addr = netaddr.IPAddress(ex_gw_ip) + if addr.version == lib_constants.IP_VERSION_4: + gw_cidrs.add(common_utils.ip_to_cidr(ex_gw_ip)) + return gw_cidrs + def configure_fip_addresses(self, interface_name): try: return self.process_floating_ip_addresses(interface_name) @@ -338,14 +361,14 @@ def put_fips_in_error_state(self): fip_statuses = {} - for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []): - fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR + for fip in self.router.get(lib_constants.FLOATINGIP_KEY, []): + fip_statuses[fip['id']] = lib_constants.FLOATINGIP_STATUS_ERROR return fip_statuses def delete(self, agent): self.router['gw_port'] = None - self.router[l3_constants.INTERFACE_KEY] = [] - self.router[l3_constants.FLOATINGIP_KEY] = [] + self.router[lib_constants.INTERFACE_KEY] = [] + self.router[lib_constants.FLOATINGIP_KEY] = [] self.process_delete(agent) self.disable_radvd() self.router_namespace.delete() @@ -464,7 +487,7 @@ def _process_internal_ports(self, pd): existing_port_ids = set(p['id'] for p in self.internal_ports) - internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) + internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) current_port_ids = set(p['id'] for p in internal_ports if p['admin_state_up']) @@ -588,6 +611,14 @@ device = ip_lib.IPDevice(device_name, namespace=namespace) device.route.add_route(subnet['gateway_ip'], scope='link') + def _enable_ra_on_gw(self, ex_gw_port, ns_name, interface_name): + gateway_ips = self._get_external_gw_ips(ex_gw_port) + if not self.use_ipv6 or self.is_v6_gateway_set(gateway_ips): + return + + # There is no IPv6 gw_ip, use RouterAdvt for default route. + self.driver.configure_ipv6_ra(ns_name, interface_name) + def _external_gateway_added(self, ex_gw_port, interface_name, ns_name, preserve_ips): LOG.debug("External gateway added: port(%s), interface(%s), ns(%s)", @@ -599,10 +630,6 @@ ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) gateway_ips = self._get_external_gw_ips(ex_gw_port) - enable_ra_on_gw = False - if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips): - # There is no IPv6 gw_ip, use RouterAdvt for default route. - enable_ra_on_gw = True self._add_route_to_gw(ex_gw_port, device_name=interface_name, namespace=ns_name, preserve_ips=preserve_ips) @@ -615,11 +642,18 @@ clean_connections=True) device = ip_lib.IPDevice(interface_name, namespace=ns_name) - for ip in gateway_ips or []: + current_gateways = set() + for ip_version in (lib_constants.IP_VERSION_4, + lib_constants.IP_VERSION_6): + gateway = device.route.get_gateway(ip_version=ip_version) + if gateway and gateway.get('gateway'): + current_gateways.add(gateway.get('gateway')) + for ip in current_gateways - set(gateway_ips): + device.route.delete_gateway(ip) + for ip in gateway_ips: device.route.add_gateway(ip) - if enable_ra_on_gw: - self.driver.configure_ipv6_ra(ns_name, interface_name) + self._enable_ra_on_gw(ex_gw_port, ns_name, interface_name) for fixed_ip in ex_gw_port['fixed_ips']: ip_lib.send_ip_addr_adv_notif(ns_name, @@ -650,10 +684,11 @@ ex_gw_port, interface_name) device = ip_lib.IPDevice(interface_name, namespace=self.ns_name) for ip_addr in ex_gw_port['fixed_ips']: + prefixlen = ip_addr.get('prefixlen') self.remove_external_gateway_ip(device, common_utils.ip_to_cidr( ip_addr['ip_address'], - ip_addr['prefixlen'])) + prefixlen)) self.driver.unplug(interface_name, bridge=self.agent_conf.external_network_bridge, namespace=self.ns_name, @@ -821,7 +856,7 @@ existing_floating_ips = self.floating_ips self.floating_ips = set(fip_statuses.keys()) for fip_id in existing_floating_ips - self.floating_ips: - fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN + fip_statuses[fip_id] = lib_constants.FLOATINGIP_STATUS_DOWN # filter out statuses that didn't change fip_statuses = {f: stat for f, stat in fip_statuses.items() if stat != FLOATINGIP_STATUS_NOCHANGE} @@ -880,8 +915,8 @@ 'PREROUTING', copy_address_scope_for_existing) def _get_port_devicename_scopemark(self, ports, name_generator): - devicename_scopemark = {l3_constants.IP_VERSION_4: dict(), - l3_constants.IP_VERSION_6: dict()} + devicename_scopemark = {lib_constants.IP_VERSION_4: dict(), + lib_constants.IP_VERSION_6: dict()} for p in ports: device_name = name_generator(p['id']) ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips']) @@ -895,7 +930,7 @@ def _get_address_scope_mark(self): # Prepare address scope iptables rule for internal ports - internal_ports = self.router.get(l3_constants.INTERFACE_KEY, []) + internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) ports_scopemark = self._get_port_devicename_scopemark( internal_ports, self.get_internal_device_name) @@ -904,8 +939,8 @@ if external_port: external_port_scopemark = self._get_port_devicename_scopemark( [external_port], self.get_external_device_name) - for ip_version in (l3_constants.IP_VERSION_4, - l3_constants.IP_VERSION_6): + for ip_version in (lib_constants.IP_VERSION_4, + lib_constants.IP_VERSION_6): ports_scopemark[ip_version].update( external_port_scopemark[ip_version]) return ports_scopemark @@ -918,13 +953,13 @@ external_port['id']) # Process address scope iptables rules - for ip_version in (l3_constants.IP_VERSION_4, - l3_constants.IP_VERSION_6): + for ip_version in (lib_constants.IP_VERSION_4, + lib_constants.IP_VERSION_6): scopemarks = ports_scopemark[ip_version] iptables = iptables_manager.get_tables(ip_version) iptables['mangle'].empty_chain('scope') iptables['filter'].empty_chain('scope') - dont_block_external = (ip_version == l3_constants.IP_VERSION_4 + dont_block_external = (ip_version == lib_constants.IP_VERSION_4 and self._snat_enabled and external_port) for device_name, mark in scopemarks.items(): # Add address scope iptables rule @@ -947,7 +982,7 @@ return scopes = external_port.get('address_scopes', {}) - return scopes.get(str(l3_constants.IP_VERSION_4)) + return scopes.get(str(lib_constants.IP_VERSION_4)) def process_external_port_address_scope_routing(self, iptables_manager): if not self._snat_enabled: @@ -997,9 +1032,13 @@ :param agent: Passes the agent in order to send RPC messages. """ LOG.debug("process router delete") - self._process_internal_ports(agent.pd) - agent.pd.sync_router(self.router['id']) - self._process_external_on_delete(agent) + if self.router_namespace.exists(): + self._process_internal_ports(agent.pd) + agent.pd.sync_router(self.router['id']) + self._process_external_on_delete(agent) + else: + LOG.warning(_LW("Can't gracefully delete the router %s: " + "no router namespace found."), self.router['id']) @common_utils.exception_logger() def process(self, agent): @@ -1021,5 +1060,8 @@ # Update ex_gw_port and enable_snat on the router info cache self.ex_gw_port = self.get_ex_gw_port() + self.fip_map = dict([(fip['floating_ip_address'], + fip['fixed_ip_address']) + for fip in self.get_floating_ips()]) # TODO(Carl) FWaaS uses this. Why is it set after processing is done? self.enable_snat = self.router.get('enable_snat') diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/l3_agent.py neutron-9.0.0~b3~dev557/neutron/agent/l3_agent.py --- neutron-9.0.0~b2~dev280/neutron/agent/l3_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/l3_agent.py 2016-08-03 20:10:33.000000000 +0000 @@ -20,7 +20,6 @@ from oslo_service import service from neutron.agent.common import config -from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import ha from neutron.agent.linux import external_process from neutron.agent.linux import interface @@ -29,11 +28,12 @@ from neutron.agent.metadata import config as metadata_config from neutron.common import config as common_config from neutron.common import topics +from neutron.conf.agent.l3 import config as l3_config from neutron import service as neutron_service def register_opts(conf): - conf.register_opts(l3_config.OPTS) + l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf) conf.register_opts(metadata_config.DRIVER_OPTS) conf.register_opts(metadata_config.SHARED_OPTS) conf.register_opts(ha.OPTS) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/async_process.py neutron-9.0.0~b3~dev557/neutron/agent/linux/async_process.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/async_process.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/async_process.py 2016-08-03 20:10:33.000000000 +0000 @@ -113,7 +113,7 @@ self._spawn() if block: - utils.wait_until_true(self.is_active) + common_utils.wait_until_true(self.is_active) def stop(self, block=False, kill_signal=signal.SIGKILL): """Halt the process and watcher threads. @@ -131,7 +131,7 @@ raise AsyncProcessException(_('Process is not running.')) if block: - utils.wait_until_true(lambda: not self.is_active()) + common_utils.wait_until_true(lambda: not self.is_active()) def _spawn(self): """Spawn a process and its watchers.""" diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/bridge_lib.py neutron-9.0.0~b3~dev557/neutron/agent/linux/bridge_lib.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/bridge_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/bridge_lib.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,11 +18,10 @@ import os -from oslo_log import log as logging +from oslo_utils import excutils from neutron.agent.linux import ip_lib - -LOG = logging.getLogger(__name__) +from neutron.agent.linux import utils # NOTE(toabctl): Don't use /sys/devices/virtual/net here because not all tap # devices are listed here (i.e. when using Xen) @@ -60,7 +59,11 @@ @classmethod def addbr(cls, name, namespace=None): bridge = cls(name, namespace) - bridge._brctl(['addbr', bridge.name]) + try: + bridge._brctl(['addbr', bridge.name]) + except RuntimeError: + with excutils.save_and_reraise_exception() as ectx: + ectx.reraise = not bridge.exists() return bridge @classmethod @@ -98,3 +101,24 @@ return os.listdir(BRIDGE_INTERFACES_FS % self.name) except OSError: return [] + + +class FdbInterface(object): + """provide basic functionality to edit the FDB table""" + + @classmethod + def add(cls, mac, dev): + return utils.execute(['bridge', 'fdb', 'add', mac, 'dev', dev], + run_as_root=True) + + @classmethod + def delete(cls, mac, dev): + return utils.execute(['bridge', 'fdb', 'delete', mac, 'dev', dev], + run_as_root=True) + + @classmethod + def show(cls, dev=None): + cmd = ['bridge', 'fdb', 'show'] + if dev: + cmd += ['dev', dev] + return utils.execute(cmd, run_as_root=True) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/dhcp.py neutron-9.0.0~b3~dev557/neutron/agent/linux/dhcp.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/dhcp.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/dhcp.py 2016-08-29 20:05:49.000000000 +0000 @@ -353,8 +353,8 @@ # static to preserve previous behavior addr_mode = getattr(subnet, 'ipv6_address_mode', None) ra_mode = getattr(subnet, 'ipv6_ra_mode', None) - if (addr_mode in [n_const.DHCPV6_STATEFUL, - n_const.DHCPV6_STATELESS] or + if (addr_mode in [constants.DHCPV6_STATEFUL, + constants.DHCPV6_STATELESS] or not addr_mode and not ra_mode): mode = 'static' @@ -510,7 +510,7 @@ fixed_ips, key=lambda fip: ((fip.subnet_id in v6_nets) and ( v6_nets[fip.subnet_id].ipv6_address_mode == ( - n_const.DHCPV6_STATELESS))), + constants.DHCPV6_STATELESS))), reverse=True) def _iter_hosts(self): @@ -545,11 +545,11 @@ no_opts = False if alloc.subnet_id in v6_nets: addr_mode = v6_nets[alloc.subnet_id].ipv6_address_mode - no_dhcp = addr_mode in (n_const.IPV6_SLAAC, - n_const.DHCPV6_STATELESS) + no_dhcp = addr_mode in (constants.IPV6_SLAAC, + constants.DHCPV6_STATELESS) # we don't setup anything for SLAAC. It doesn't make sense # to provide options for a client that won't use DHCP - no_opts = addr_mode == n_const.IPV6_SLAAC + no_opts = addr_mode == constants.IPV6_SLAAC # If dns_name attribute is supported by ports API, return the # dns_assignment generated by the Neutron server. Otherwise, @@ -770,7 +770,7 @@ addr_mode = getattr(subnet, 'ipv6_address_mode', None) if (not subnet.enable_dhcp or (subnet.ip_version == 6 and - addr_mode == n_const.IPV6_SLAAC)): + addr_mode == constants.IPV6_SLAAC)): continue if subnet.dns_nameservers: options.append( @@ -1259,7 +1259,7 @@ net = netaddr.IPNetwork(subnet.cidr) ip_cidrs.append('%s/%s' % (gateway, net.prefixlen)) - if self.conf.enable_isolated_metadata: + if self.conf.force_metadata or self.conf.enable_isolated_metadata: ip_cidrs.append(METADATA_DEFAULT_CIDR) self.driver.init_l3(interface_name, ip_cidrs, diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/external_process.py neutron-9.0.0~b3~dev557/neutron/agent/linux/external_process.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/external_process.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/external_process.py 2016-08-29 20:05:49.000000000 +0000 @@ -61,7 +61,8 @@ """ def __init__(self, conf, uuid, namespace=None, service=None, pids_path=None, default_cmd_callback=None, - cmd_addl_env=None, pid_file=None, run_as_root=False): + cmd_addl_env=None, pid_file=None, run_as_root=False, + custom_reload_callback=None): self.conf = conf self.uuid = uuid @@ -71,6 +72,7 @@ self.pids_path = pids_path or self.conf.external_pids self.pid_file = pid_file self.run_as_root = run_as_root + self.custom_reload_callback = custom_reload_callback if service: self.service_pid_fname = 'pid.' + service @@ -94,7 +96,10 @@ self.reload_cfg() def reload_cfg(self): - self.disable('HUP') + if self.custom_reload_callback: + self.disable(get_stop_command=self.custom_reload_callback) + else: + self.disable('HUP') def disable(self, sig='9', get_stop_command=None): pid = self.pid diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/interface.py neutron-9.0.0~b3~dev557/neutron/agent/linux/interface.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/interface.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/interface.py 2016-08-03 20:10:33.000000000 +0000 @@ -26,9 +26,7 @@ from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.agent.linux import utils -from neutron.common import constants as n_const from neutron.common import exceptions -from neutron.common import ipv6_utils LOG = logging.getLogger(__name__) @@ -43,12 +41,6 @@ 'Support kernels with limited namespace support ' '(e.g. RHEL 6.5) so long as ovs_use_veth is set to ' 'True.')), - cfg.IntOpt('network_device_mtu', - deprecated_for_removal=True, - help=_('MTU setting for device. This option will be removed in ' - 'Newton. Please use the system-wide segment_mtu setting ' - 'which the agents will take into account when wiring ' - 'VIFs.')), ] @@ -61,17 +53,6 @@ def __init__(self, conf): self.conf = conf - if self.conf.network_device_mtu: - self._validate_network_device_mtu() - - def _validate_network_device_mtu(self): - if (ipv6_utils.is_enabled() and - self.conf.network_device_mtu < n_const.IPV6_MIN_MTU): - LOG.error(_LE("IPv6 protocol requires a minimum MTU of " - "%(min_mtu)s, while the configured value is " - "%(current_mtu)s"), {'min_mtu': n_const.IPV6_MIN_MTU, - 'current_mtu': self.conf.network_device_mtu}) - raise SystemExit(1) @property def use_gateway_ips(self): @@ -352,7 +333,6 @@ # the device is moved into a namespace, otherwise OVS bridge does not # allow to set MTU that is higher than the least of all device MTUs on # the bridge - mtu = self.conf.network_device_mtu or mtu if mtu: ns_dev.link.set_mtu(mtu) if self.conf.ovs_use_veth: @@ -416,7 +396,6 @@ ns_dev = ip.device(device_name) ns_dev.link.set_address(mac_address) - mtu = self.conf.network_device_mtu or mtu if mtu: ns_dev.link.set_mtu(mtu) root_dev.link.set_mtu(mtu) @@ -463,7 +442,6 @@ root_veth.disable_ipv6() ns_veth.link.set_address(mac_address) - mtu = self.conf.network_device_mtu or mtu if mtu: root_veth.link.set_mtu(mtu) ns_veth.link.set_mtu(mtu) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/ip_conntrack.py neutron-9.0.0~b3~dev557/neutron/agent/linux/ip_conntrack.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/ip_conntrack.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/ip_conntrack.py 2016-08-29 20:05:49.000000000 +0000 @@ -58,7 +58,11 @@ ip_cmd = [str(net.ip), '-w', zone_id] if remote_ip and str( netaddr.IPNetwork(remote_ip).version) in ethertype: - ip_cmd.extend(['-s', str(remote_ip)]) + if rule.get('direction') == 'ingress': + direction = '-s' + else: + direction = '-d' + ip_cmd.extend([direction, str(remote_ip)]) conntrack_cmds.add(tuple(cmd + ip_cmd)) return conntrack_cmds diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/ip_lib.py neutron-9.0.0~b3~dev557/neutron/agent/linux/ip_lib.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/ip_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/ip_lib.py 2016-08-29 20:05:49.000000000 +0000 @@ -28,6 +28,7 @@ from neutron._i18n import _, _LE from neutron.agent.common import utils from neutron.common import exceptions as n_exc +from neutron.common import utils as common_utils LOG = logging.getLogger(__name__) @@ -245,7 +246,9 @@ @classmethod def get_namespaces(cls): - output = cls._execute([], 'netns', ('list',)) + output = cls._execute( + [], 'netns', ('list',), + run_as_root=cfg.CONF.AGENT.use_helper_for_ns_read) return [l.split()[0] for l in output.splitlines()] @@ -656,7 +659,7 @@ address=address, reason=_('Duplicate address detected')) errmsg = _("Exceeded %s second limit waiting for " "address to leave the tentative state.") % wait_time - utils.utils.wait_until_true( + common_utils.wait_until_true( is_address_ready, timeout=wait_time, sleep=0.20, exception=AddressNotReady(address=address, reason=errmsg)) @@ -696,7 +699,7 @@ with excutils.save_and_reraise_exception() as ctx: if "Cannot find device" in str(rte): ctx.reraise = False - raise n_exc.DeviceNotFoundError(device_name=self.name) + raise exceptions.DeviceNotFoundError(device_name=self.name) def delete_gateway(self, gateway, table=None): ip_version = get_ip_version(gateway) @@ -777,6 +780,13 @@ return retval + def flush(self, ip_version, table=None, **kwargs): + args = ['flush'] + args += self._table_args(table) + for k, v in kwargs.items(): + args += [k, v] + self._as_root([ip_version], tuple(args)) + def add_route(self, cidr, via=None, table=None, **kwargs): ip_version = get_ip_version(cidr) args = ['replace', cidr] diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/iptables_firewall.py neutron-9.0.0~b3~dev557/neutron/agent/linux/iptables_firewall.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/iptables_firewall.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/iptables_firewall.py 2016-08-03 20:10:33.000000000 +0000 @@ -49,6 +49,10 @@ comment_rule = iptables_manager.comment_rule +def get_hybrid_port_name(port_name): + return (constants.TAP_DEVICE_PREFIX + port_name)[:LINUX_DEV_LEN] + + class mac_iptables(netaddr.mac_eui48): """mac format class for netaddr to match iptables representation.""" word_sep = ':' @@ -142,6 +146,9 @@ def update_security_group_members(self, sg_id, sg_members): LOG.debug("Update members of security group (%s)", sg_id) self.sg_members[sg_id] = collections.defaultdict(list, sg_members) + if self.enable_ipset: + for ip_version, current_ips in sg_members.items(): + self.ipset.set_members(sg_id, ip_version, current_ips) def _set_ports(self, port): if not firewall.port_sec_enabled(port): @@ -501,10 +508,6 @@ # select rules for current port and direction security_group_rules = self._select_sgr_by_direction(port, direction) security_group_rules += self._select_sg_rules_for_port(port, direction) - # make sure ipset members are updated for remote security groups - if self.enable_ipset: - remote_sg_ids = self._get_remote_sg_ids(port, direction) - self._update_ipset_members(remote_sg_ids) # split groups by ip version # for ipv4, iptables command is used # for ipv6, iptables6 command is used @@ -536,12 +539,6 @@ ipv6_iptables_rules) self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules) - def _update_ipset_members(self, security_group_ids): - for ip_version, sg_ids in security_group_ids.items(): - for sg_id in sg_ids: - current_ips = self.sg_members[sg_id][ip_version] - self.ipset.set_members(sg_id, ip_version, current_ips) - def _generate_ipset_rule_args(self, sg_rule, remote_gid): ethertype = sg_rule.get('ethertype') ipset_name = self.ipset.get_name(remote_gid, ethertype) @@ -916,12 +913,12 @@ return iptables_manager.get_chain_name( '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) - def _get_device_name(self, port): - return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN] - def _get_br_device_name(self, port): return ('qvb' + port['device'])[:LINUX_DEV_LEN] + def _get_device_name(self, port): + return get_hybrid_port_name(port['device']) + def _get_jump_rule(self, port, direction): if direction == firewall.INGRESS_DIRECTION: device = self._get_br_device_name(port) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/iptables_manager.py neutron-9.0.0~b3~dev557/neutron/agent/linux/iptables_manager.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/iptables_manager.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/iptables_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -412,6 +412,9 @@ finally: try: self.defer_apply_off() + except n_exc.IpTablesApplyException: + # already in the format we want, just reraise + raise except Exception: msg = _('Failure applying iptables rules') LOG.exception(msg) @@ -436,7 +439,16 @@ lock_name += '-' + self.namespace with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True): - return self._apply_synchronized() + first = self._apply_synchronized() + if not cfg.CONF.AGENT.debug_iptables_rules: + return first + second = self._apply_synchronized() + if second: + msg = (_("IPTables Rules did not converge. Diff: %s") % + '\n'.join(second)) + LOG.error(msg) + raise n_exc.IpTablesApplyException(msg) + return first def get_rules_for_table(self, table): """Runs iptables-save on a table and returns the results.""" diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/openvswitch_firewall/firewall.py neutron-9.0.0~b3~dev557/neutron/agent/linux/openvswitch_firewall/firewall.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/openvswitch_firewall/firewall.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/openvswitch_firewall/firewall.py 2016-08-03 20:10:33.000000000 +0000 @@ -164,13 +164,13 @@ class OVSFirewallDriver(firewall.FirewallDriver): - REQUIRED_PROTOCOLS = ",".join([ + REQUIRED_PROTOCOLS = [ ovs_consts.OPENFLOW10, ovs_consts.OPENFLOW11, ovs_consts.OPENFLOW12, ovs_consts.OPENFLOW13, ovs_consts.OPENFLOW14, - ]) + ] provides_arp_spoofing_protection = True diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/pd.py neutron-9.0.0~b3~dev557/neutron/agent/linux/pd.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/pd.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/pd.py 2016-08-03 20:10:33.000000000 +0000 @@ -24,7 +24,6 @@ from stevedore import driver from neutron._i18n import _ -from neutron.agent.linux import utils as linux_utils from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources @@ -221,17 +220,17 @@ def _ensure_lla_task(self, gw_ifname, ns_name, lla_with_mask): # It would be insane for taking so long unless DAD test failed # In that case, the subnet would never be assigned a prefix. - linux_utils.wait_until_true(functools.partial(self._lla_available, - gw_ifname, - ns_name, - lla_with_mask), - timeout=l3_constants.LLA_TASK_TIMEOUT, - sleep=2) + utils.wait_until_true(functools.partial(self._lla_available, + gw_ifname, + ns_name, + lla_with_mask), + timeout=l3_constants.LLA_TASK_TIMEOUT, + sleep=2) def _lla_available(self, gw_ifname, ns_name, lla_with_mask): llas = self._get_llas(gw_ifname, ns_name) if self._is_lla_active(lla_with_mask, llas): - LOG.debug("LLA %s is active now" % lla_with_mask) + LOG.debug("LLA %s is active now", lla_with_mask) self.pd_update_cb() return True diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/polling.py neutron-9.0.0~b3~dev557/neutron/agent/linux/polling.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/polling.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/polling.py 2016-08-29 20:05:49.000000000 +0000 @@ -54,7 +54,7 @@ respawn_interval=ovsdb_monitor_respawn_interval) def start(self): - self._monitor.start() + self._monitor.start(block=True) def stop(self): try: diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/ra.py neutron-9.0.0~b3~dev557/neutron/agent/linux/ra.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/ra.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/ra.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,6 +17,7 @@ import jinja2 import netaddr +from neutron_lib import constants from oslo_config import cfg from oslo_log import log as logging import six @@ -24,7 +25,7 @@ from neutron._i18n import _ from neutron.agent.linux import external_process from neutron.agent.linux import utils -from neutron.common import constants +from neutron.common import constants as n_const from neutron.common import utils as common_utils @@ -53,7 +54,7 @@ MinRtrAdvInterval {{ min_rtr_adv_interval }}; MaxRtrAdvInterval {{ max_rtr_adv_interval }}; - {% if network_mtu >= constants.IPV6_MIN_MTU %} + {% if network_mtu >= n_const.IPV6_MIN_MTU %} AdvLinkMTU {{network_mtu}}; {% endif %} @@ -132,6 +133,7 @@ auto_config_prefixes=auto_config_prefixes, stateful_config_prefixes=stateful_config_prefixes, dns_servers=dns_servers[0:MAX_RDNSS_ENTRIES], + n_const=n_const, constants=constants, min_rtr_adv_interval=self._agent_conf.min_rtr_adv_interval, max_rtr_adv_interval=self._agent_conf.max_rtr_adv_interval, diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/linux/utils.py neutron-9.0.0~b3~dev557/neutron/agent/linux/utils.py --- neutron-9.0.0~b2~dev280/neutron/agent/linux/utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/linux/utils.py 2016-08-03 20:10:33.000000000 +0000 @@ -23,6 +23,7 @@ import struct import threading +import debtcollector import eventlet from eventlet.green import subprocess from eventlet import greenthread @@ -61,7 +62,7 @@ def addl_env_args(addl_env): - """Build arugments for adding additional environment vars with env""" + """Build arguments for adding additional environment vars with env""" # NOTE (twilson) If using rootwrap, an EnvFilter should be set up for the # command instead of a CommandFilter. @@ -203,12 +204,6 @@ LOG.debug('Unable to access %s', filename) -def get_value_from_conf_file(cfg_root, uuid, cfg_file, converter=None): - """A helper function to read a value from one of a config file.""" - file_name = get_conf_file_name(cfg_root, uuid, cfg_file) - return get_value_from_file(file_name, converter) - - def remove_conf_files(cfg_root, uuid): conf_base = _get_conf_base(cfg_root, uuid, False) for file_path in glob.iglob("%s.*" % conf_base): @@ -288,20 +283,9 @@ return cmd_matches_expected(cmd, expected_cmd) -def wait_until_true(predicate, timeout=60, sleep=1, exception=None): - """ - Wait until callable predicate is evaluated as True - - :param predicate: Callable deciding whether waiting should continue. - Best practice is to instantiate predicate with functools.partial() - :param timeout: Timeout in seconds how long should function wait. - :param sleep: Polling interval for results in seconds. - :param exception: Exception class for eventlet.Timeout. - (see doc for eventlet.Timeout for more information) - """ - with eventlet.timeout.Timeout(timeout, exception): - while not predicate(): - eventlet.sleep(sleep) +wait_until_true = debtcollector.moves.moved_function( + utils.wait_until_true, 'wait_until_true', __name__, + version='Newton', removal_version='Ocata') def ensure_directory_exists_without_file(path): diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/metadata/config.py neutron-9.0.0~b3~dev557/neutron/agent/metadata/config.py --- neutron-9.0.0~b2~dev280/neutron/agent/metadata/config.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/metadata/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -50,36 +50,36 @@ METADATA_PROXY_HANDLER_OPTS = [ - cfg.StrOpt('auth_ca_cert', - help=_("Certificate Authority public key (CA cert) " - "file for ssl")), - cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', - help=_("IP address used by Nova metadata server.")), - cfg.PortOpt('nova_metadata_port', - default=8775, - help=_("TCP Port used by Nova metadata server.")), - cfg.StrOpt('metadata_proxy_shared_secret', - default='', - help=_('When proxying metadata requests, Neutron signs the ' - 'Instance-ID header with a shared secret to prevent ' - 'spoofing. You may select any string for a secret, ' - 'but it must match here and in the configuration used ' - 'by the Nova Metadata Server. NOTE: Nova uses the same ' - 'config key, but in [neutron] section.'), - secret=True), - cfg.StrOpt('nova_metadata_protocol', - default='http', - choices=['http', 'https'], - help=_("Protocol to access nova metadata, http or https")), - cfg.BoolOpt('nova_metadata_insecure', default=False, - help=_("Allow to perform insecure SSL (https) requests to " - "nova metadata")), - cfg.StrOpt('nova_client_cert', - default='', - help=_("Client certificate for nova metadata api server.")), - cfg.StrOpt('nova_client_priv_key', - default='', - help=_("Private key of client certificate.")) + cfg.StrOpt('auth_ca_cert', + help=_("Certificate Authority public key (CA cert) " + "file for ssl")), + cfg.StrOpt('nova_metadata_ip', default='127.0.0.1', + help=_("IP address used by Nova metadata server.")), + cfg.PortOpt('nova_metadata_port', + default=8775, + help=_("TCP Port used by Nova metadata server.")), + cfg.StrOpt('metadata_proxy_shared_secret', + default='', + help=_('When proxying metadata requests, Neutron signs the ' + 'Instance-ID header with a shared secret to prevent ' + 'spoofing. You may select any string for a secret, ' + 'but it must match here and in the configuration used ' + 'by the Nova Metadata Server. NOTE: Nova uses the same ' + 'config key, but in [neutron] section.'), + secret=True), + cfg.StrOpt('nova_metadata_protocol', + default='http', + choices=['http', 'https'], + help=_("Protocol to access nova metadata, http or https")), + cfg.BoolOpt('nova_metadata_insecure', default=False, + help=_("Allow to perform insecure SSL (https) requests to " + "nova metadata")), + cfg.StrOpt('nova_client_cert', + default='', + help=_("Client certificate for nova metadata api server.")), + cfg.StrOpt('nova_client_priv_key', + default='', + help=_("Private key of client certificate.")) ] DEDUCE_MODE = 'deduce' diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/api.py neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/api.py --- neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/api.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/api.py 2016-08-03 20:10:33.000000000 +0000 @@ -30,7 +30,7 @@ OPTS = [ cfg.StrOpt('ovsdb_interface', choices=interface_map.keys(), - default='vsctl', + default='native', help=_('The interface for interacting with the OVSDB')), cfg.StrOpt('ovsdb_connection', default='tcp:127.0.0.1:6640', diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/impl_idl.py neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/impl_idl.py --- neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/impl_idl.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/impl_idl.py 2016-08-03 20:10:33.000000000 +0000 @@ -14,13 +14,14 @@ import time +from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from ovs.db import idl from six.moves import queue as Queue -from neutron._i18n import _ +from neutron._i18n import _, _LE from neutron.agent.ovsdb import api from neutron.agent.ovsdb.native import commands as cmd from neutron.agent.ovsdb.native import connection @@ -32,9 +33,13 @@ LOG = logging.getLogger(__name__) +class VswitchdInterfaceAddException(exceptions.NeutronException): + message = _("Failed to add interfaces: %(ifaces)s") + + class Transaction(api.Transaction): def __init__(self, api, ovsdb_connection, timeout, - check_error=False, log_errors=False): + check_error=False, log_errors=True): self.api = api self.check_error = check_error self.log_errors = log_errors @@ -42,6 +47,10 @@ self.results = Queue.Queue(1) self.ovsdb_connection = ovsdb_connection self.timeout = timeout + self.expected_ifaces = set() + + def __str__(self): + return ", ".join(str(cmd) for cmd in self.commands) def add(self, command): """Add a command to the transaction @@ -61,23 +70,29 @@ _("Commands %(commands)s exceeded timeout %(timeout)d " "seconds") % {'commands': self.commands, 'timeout': self.timeout}) - if self.check_error: - if isinstance(result, idlutils.ExceptionResult): - if self.log_errors: - LOG.error(result.tb) + if isinstance(result, idlutils.ExceptionResult): + if self.log_errors: + LOG.error(result.tb) + if self.check_error: raise result.ex return result + def pre_commit(self, txn): + pass + + def post_commit(self, txn): + pass + def do_commit(self): - start_time = time.time() + self.start_time = time.time() attempts = 0 while True: - elapsed_time = time.time() - start_time - if attempts > 0 and elapsed_time > self.timeout: - raise RuntimeError("OVS transaction timed out") + if attempts > 0 and self.timeout_exceeded(): + raise RuntimeError(_("OVS transaction timed out")) attempts += 1 # TODO(twilson) Make sure we don't loop longer than vsctl_timeout txn = idl.Transaction(self.api.idl) + self.pre_commit(txn) for i, command in enumerate(self.commands): LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s", {'idx': i, 'cmd': command}) @@ -92,9 +107,8 @@ status = txn.commit_block() if status == txn.TRY_AGAIN: LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying") - idlutils.wait_for_change( - self.api.idl, self.timeout - elapsed_time, - seqno) + idlutils.wait_for_change(self.api.idl, self.time_remaining(), + seqno) continue elif status == txn.ERROR: msg = _("OVSDB Error: %s") % txn.get_error() @@ -109,9 +123,67 @@ return elif status == txn.UNCHANGED: LOG.debug("Transaction caused no change") + elif status == txn.SUCCESS: + self.post_commit(txn) return [cmd.result for cmd in self.commands] + def elapsed_time(self): + return time.time() - self.start_time + + def time_remaining(self): + return self.timeout - self.elapsed_time() + + def timeout_exceeded(self): + return self.elapsed_time() > self.timeout + + +class NeutronOVSDBTransaction(Transaction): + def pre_commit(self, txn): + self.api._ovs.increment('next_cfg') + txn.expected_ifaces = set() + + def post_commit(self, txn): + # ovs-vsctl only logs these failures and does not return nonzero + try: + self.do_post_commit(txn) + except Exception: + LOG.exception(_LE("Post-commit checks failed")) + + def do_post_commit(self, txn): + next_cfg = txn.get_increment_new_value() + while not self.timeout_exceeded(): + self.api.idl.run() + if self.vswitchd_has_completed(next_cfg): + failed = self.post_commit_failed_interfaces(txn) + if failed: + raise VswitchdInterfaceAddException( + ifaces=", ".join(failed)) + break + self.ovsdb_connection.poller.timer_wait( + self.time_remaining() * 1000) + self.api.idl.wait(self.ovsdb_connection.poller) + self.ovsdb_connection.poller.block() + else: + raise api.TimeoutException( + _("Commands %(commands)s exceeded timeout %(timeout)d " + "seconds post-commit") % {'commands': self.commands, + 'timeout': self.timeout}) + + def post_commit_failed_interfaces(self, txn): + failed = [] + for iface_uuid in txn.expected_ifaces: + uuid = txn.get_insert_uuid(iface_uuid) + if uuid: + ifaces = self.api.idl.tables['Interface'] + iface = ifaces.rows.get(uuid) + if iface and (not iface.ofport or iface.ofport == -1): + failed.append(iface.name) + return failed + + def vswitchd_has_completed(self, next_cfg): + return self.api._ovs.cur_cfg >= next_cfg + class OvsdbIdl(api.API): @@ -133,9 +205,9 @@ return list(self._tables['Open_vSwitch'].rows.values())[0] def transaction(self, check_error=False, log_errors=True, **kwargs): - return Transaction(self, OvsdbIdl.ovsdb_connection, - self.context.vsctl_timeout, - check_error, log_errors) + return NeutronOVSDBTransaction(self, OvsdbIdl.ovsdb_connection, + self.context.vsctl_timeout, + check_error, log_errors) def add_br(self, name, may_exist=True, datapath_type=None): return cmd.AddBridgeCommand(self, name, may_exist, datapath_type) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/native/commands.py neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/native/commands.py --- neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/native/commands.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/native/commands.py 2016-08-29 20:05:49.000000000 +0000 @@ -303,6 +303,7 @@ br.ports = ports iface = txn.insert(self.api._tables['Interface']) + txn.expected_ifaces.add(iface.uuid) iface.name = self.port port.verify('interfaces') ifaces = getattr(port, 'interfaces', []) diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/native/connection.py neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/native/connection.py --- neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/native/connection.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/native/connection.py 2016-08-03 20:10:33.000000000 +0000 @@ -106,7 +106,7 @@ self.poller.fd_wait(self.txns.alert_fileno, poller.POLLIN) #TODO(jlibosva): Remove next line once losing connection to ovsdb # is solved. - self.poller.timer_wait(self.timeout) + self.poller.timer_wait(self.timeout * 1000) self.poller.block() self.idl.run() txn = self.txns.get_nowait() diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/native/idlutils.py neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/native/idlutils.py --- neutron-9.0.0~b2~dev280/neutron/agent/ovsdb/native/idlutils.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/ovsdb/native/idlutils.py 2016-08-29 20:05:49.000000000 +0000 @@ -194,7 +194,7 @@ elif isinstance(match, list): # According to rfc7047, lists support '=' and '!=' # (both strict and relaxed). Will follow twilson's dict comparison - # and implement relaxed version (excludes/includes as per standart) + # and implement relaxed version (excludes/includes as per standard) if op == "=": if not all([val, match]): return val == match diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/rpc.py neutron-9.0.0~b3~dev557/neutron/agent/rpc.py --- neutron-9.0.0~b2~dev280/neutron/agent/rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/rpc.py 2016-08-03 20:10:33.000000000 +0000 @@ -73,7 +73,8 @@ self.client = n_rpc.get_client(target) def report_state(self, context, agent_state, use_call=False): - cctxt = self.client.prepare() + cctxt = self.client.prepare( + timeout=n_rpc.TRANSPORT.conf.rpc_response_timeout) # add unique identifier to a report # that can be logged on server side. # This create visible correspondence between events on diff -Nru neutron-9.0.0~b2~dev280/neutron/agent/securitygroups_rpc.py neutron-9.0.0~b3~dev557/neutron/agent/securitygroups_rpc.py --- neutron-9.0.0~b2~dev280/neutron/agent/securitygroups_rpc.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/agent/securitygroups_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -20,32 +20,16 @@ from oslo_log import log as logging import oslo_messaging -from neutron._i18n import _, _LI, _LW +from neutron._i18n import _LI, _LW from neutron.agent import firewall from neutron.api.rpc.handlers import securitygroups_rpc +from neutron.conf.agent import securitygroups_rpc as sc_cfg + LOG = logging.getLogger(__name__) -security_group_opts = [ - cfg.StrOpt( - 'firewall_driver', - help=_('Driver for security groups firewall in the L2 agent')), - cfg.BoolOpt( - 'enable_security_group', - default=True, - help=_( - 'Controls whether the neutron security group API is enabled ' - 'in the server. It should be false when using no security ' - 'groups or using the nova security group API.')), - cfg.BoolOpt( - 'enable_ipset', - default=True, - help=_('Use ipset to speed-up the iptables based security groups. ' - 'Enabling ipset support requires that ipset is installed on L2 ' - 'agent node.')) -] -cfg.CONF.register_opts(security_group_opts, 'SECURITYGROUP') +sc_cfg.register_securitygroups_opts() #This is backward compatibility check for Havana @@ -90,7 +74,6 @@ self.context = context self.plugin_rpc = plugin_rpc self.init_firewall(defer_refresh_firewall, integration_bridge) - self.local_vlan_map = local_vlan_map def init_firewall(self, defer_refresh_firewall=False, integration_bridge=None): @@ -152,6 +135,9 @@ if not device_ids: return LOG.info(_LI("Preparing filters for devices %s"), device_ids) + self._apply_port_filter(device_ids) + + def _apply_port_filter(self, device_ids, update_filter=False): if self.use_enhanced_rpc: devices_info = self.plugin_rpc.security_group_info_for_devices( self.context, list(device_ids)) @@ -169,7 +155,12 @@ self._update_security_group_info( security_groups, security_group_member_ips) for device in devices.values(): - self.firewall.prepare_port_filter(device) + if update_filter: + LOG.debug("Update port filter for %s", device['device']) + self.firewall.update_port_filter(device) + else: + LOG.debug("Prepare port filter for %s", device['device']) + self.firewall.prepare_port_filter(device) def _update_security_group_info(self, security_groups, security_group_member_ips): @@ -242,25 +233,7 @@ if not device_ids: LOG.info(_LI("No ports here to refresh firewall")) return - if self.use_enhanced_rpc: - devices_info = self.plugin_rpc.security_group_info_for_devices( - self.context, device_ids) - devices = devices_info['devices'] - security_groups = devices_info['security_groups'] - security_group_member_ips = devices_info['sg_member_ips'] - else: - devices = self.plugin_rpc.security_group_rules_for_devices( - self.context, device_ids) - - with self.firewall.defer_apply(): - if self.use_enhanced_rpc: - LOG.debug("Update security group information for ports %s", - devices.keys()) - self._update_security_group_info( - security_groups, security_group_member_ips) - for device in devices.values(): - LOG.debug("Update port filter for %s", device['device']) - self.firewall.update_port_filter(device) + self._apply_port_filter(device_ids, update_filter=True) def firewall_refresh_needed(self): return self.global_refresh_firewall or self.devices_to_refilter diff -Nru neutron-9.0.0~b2~dev280/neutron/api/api_common.py neutron-9.0.0~b3~dev557/neutron/api/api_common.py --- neutron-9.0.0~b2~dev280/neutron/api/api_common.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/api_common.py 2016-08-29 20:05:49.000000000 +0000 @@ -28,7 +28,6 @@ from neutron._i18n import _, _LW from neutron.common import constants -from neutron.common import exceptions as n_exc from neutron import wsgi @@ -186,6 +185,18 @@ return links +def is_native_pagination_supported(plugin): + native_pagination_attr_name = ("_%s__native_pagination_support" + % plugin.__class__.__name__) + return getattr(plugin, native_pagination_attr_name, False) + + +def is_native_sorting_supported(plugin): + native_sorting_attr_name = ("_%s__native_sorting_support" + % plugin.__class__.__name__) + return getattr(plugin, native_sorting_attr_name, False) + + class PaginationHelper(object): def __init__(self, request, primary_key='id'): @@ -374,7 +385,7 @@ def convert_exception_to_http_exc(e, faults, language): serializer = wsgi.JSONDictSerializer() - if isinstance(e, n_exc.MultipleExceptions): + if isinstance(e, exceptions.MultipleExceptions): converted_exceptions = [ convert_exception_to_http_exc(inner, faults, language) for inner in e.inner_exceptions] diff -Nru neutron-9.0.0~b2~dev280/neutron/api/extensions.py neutron-9.0.0~b3~dev557/neutron/api/extensions.py --- neutron-9.0.0~b2~dev280/neutron/api/extensions.py 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/extensions.py 2016-08-29 20:05:49.000000000 +0000 @@ -38,6 +38,27 @@ LOG = logging.getLogger(__name__) +EXTENSION_SUPPORTED_CHECK_MAP = {} +_PLUGIN_AGNOSTIC_EXTENSIONS = set() + + +def register_custom_supported_check(alias, f, plugin_agnostic=False): + '''Register a custom function to determine if extension is supported. + + Consequent calls for the same alias replace the registered function. + + :param alias: API extension alias name + :param f: custom check function that returns True if extension is supported + :param plugin_agnostic: if False, don't require a plugin to claim support + with supported_extension_aliases. If True, a plugin must claim the + extension is supported. + ''' + + EXTENSION_SUPPORTED_CHECK_MAP[alias] = f + if plugin_agnostic: + _PLUGIN_AGNOSTIC_EXTENSIONS.add(alias) + + @six.add_metaclass(abc.ABCMeta) class PluginInterface(object): @@ -506,17 +527,30 @@ "their requirements. Some features will not " "work as expected."), ', '.join(unloadable_extensions)) - # Fail gracefully for default extensions, just in case some out - # of tree plugins are not entirely up to speed - default_extensions = set(const.DEFAULT_SERVICE_PLUGINS.values()) - if not unloadable_extensions <= default_extensions: - raise exceptions.ExtensionsNotFound( - extensions=list(unloadable_extensions)) - + self._check_faulty_extensions(unloadable_extensions) # Extending extensions' attributes map. for ext in processed_exts.values(): ext.update_attributes_map(attr_map) + def _check_faulty_extensions(self, faulty_extensions): + """Raise for non-default faulty extensions. + + Gracefully fail for defective default extensions, which will be + removed from the list of loaded extensions. + """ + default_extensions = set(const.DEFAULT_SERVICE_PLUGINS.values()) + if not faulty_extensions <= default_extensions: + raise exceptions.ExtensionsNotFound( + extensions=list(faulty_extensions)) + else: + # Remove the faulty extensions so that they do not show during + # ext-list + for ext in faulty_extensions: + try: + del self.extensions[ext] + except KeyError: + pass + def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: @@ -598,8 +632,14 @@ """Check if an extension is supported by any plugin.""" extension_is_valid = super(PluginAwareExtensionManager, self)._check_extension(extension) - return (extension_is_valid and - self._plugins_support(extension) and + if not extension_is_valid: + return False + + alias = extension.get_alias() + if alias in EXTENSION_SUPPORTED_CHECK_MAP: + return EXTENSION_SUPPORTED_CHECK_MAP[alias]() + + return (self._plugins_support(extension) and self._plugins_implement_interface(extension)) def _plugins_support(self, extension): @@ -650,6 +690,11 @@ aliases = set() for plugin in self.plugins.values(): aliases |= self.get_plugin_supported_extension_aliases(plugin) + aliases |= { + alias + for alias, func in EXTENSION_SUPPORTED_CHECK_MAP.items() + if func() + } return aliases @classmethod @@ -660,6 +705,7 @@ """Check if an extension supported by a plugin has been loaded.""" plugin_extensions = self.get_supported_extension_aliases() missing_aliases = plugin_extensions - set(self.extensions) + missing_aliases -= _PLUGIN_AGNOSTIC_EXTENSIONS if missing_aliases: raise exceptions.ExtensionsNotFound( extensions=list(missing_aliases)) diff -Nru neutron-9.0.0~b2~dev280/neutron/api/__init__.py neutron-9.0.0~b3~dev557/neutron/api/__init__.py --- neutron-9.0.0~b2~dev280/neutron/api/__init__.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/__init__.py 2016-08-29 20:05:49.000000000 +0000 @@ -1,15 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Default values for advanced API features -DEFAULT_ALLOW_SORTING = False -DEFAULT_ALLOW_PAGINATION = False diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py neutron-9.0.0~b3~dev557/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py 2016-08-03 20:10:33.000000000 +0000 @@ -149,9 +149,17 @@ elif cast_required: admin_ctx = (context if context.is_admin else context.elevated()) network = self.plugin.get_network(admin_ctx, network_id) - agents = self.plugin.get_dhcp_agents_hosting_networks( - context, [network_id]) + if 'subnet' in payload and payload['subnet'].get('segment_id'): + # if segment_id exists then the segment service plugin + # must be loaded + nm = manager.NeutronManager + segment_plugin = nm.get_service_plugins()['segments'] + segment = segment_plugin.get_segment( + context, payload['subnet']['segment_id']) + network['candidate_hosts'] = segment['hosts'] + agents = self.plugin.get_dhcp_agents_hosting_networks( + context, [network_id], hosts=network.get('candidate_hosts')) # schedule the network first, if needed schedule_required = ( method == 'subnet_create_end' or diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py neutron-9.0.0~b3~dev557/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py 2016-08-29 20:05:49.000000000 +0000 @@ -79,23 +79,10 @@ """Notify arp details to l3 agents hosting router.""" if not router_id: return - adminContext = (context.is_admin and - context or context.elevated()) - plugin = manager.NeutronManager.get_service_plugins().get( - service_constants.L3_ROUTER_NAT) - hosts = plugin.get_hosts_to_notify(adminContext, router_id) - # TODO(murali): replace cast with fanout to avoid performance - # issues at greater scale. - for host in hosts: - log_topic = '%s.%s' % (topics.L3_AGENT, host) - LOG.debug('Casting message %(method)s with topic %(topic)s', - {'topic': log_topic, 'method': method}) - dvr_arptable = {'router_id': router_id, - 'arp_table': data} - cctxt = self.client.prepare(topic=topics.L3_AGENT, - server=host, - version='1.2') - cctxt.cast(context, method, payload=dvr_arptable) + dvr_arptable = {'router_id': router_id, 'arp_table': data} + LOG.debug('Fanout dvr_arptable update: %s', dvr_arptable) + cctxt = self.client.prepare(fanout=True, version='1.2') + cctxt.cast(context, method, payload=dvr_arptable) def _notification(self, context, method, router_ids, operation, shuffle_agents, schedule_routers=True): diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/agentnotifiers/utils.py neutron-9.0.0~b3~dev557/neutron/api/rpc/agentnotifiers/utils.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/agentnotifiers/utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/agentnotifiers/utils.py 2016-08-03 20:10:33.000000000 +0000 @@ -23,12 +23,12 @@ def _call_with_retry(max_attempts): - """A wrapper to retry function using rpc call in case of + """A wrapper to retry a function using rpc call in case of MessagingException. Retries the decorated function in case of MessagingException of some kind (a timeout, client send error etc). - If maximum attempts exceeded, the exception which occured during last + If maximum attempts are exceeded, the exception which occurred during last attempt is reraised. """ def wrapper(f): diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/callbacks/consumer/registry.py neutron-9.0.0~b3~dev557/neutron/api/rpc/callbacks/consumer/registry.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/callbacks/consumer/registry.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/callbacks/consumer/registry.py 2016-08-03 20:10:33.000000000 +0000 @@ -27,12 +27,12 @@ _get_manager().unregister(callback, resource_type) -def push(resource_type, resource, event_type): - """Push resource events into all registered callbacks for the type.""" +def push(resource_type, resource_list, event_type): + """Push resource list into all registered callbacks for the event type.""" callbacks = _get_manager().get_callbacks(resource_type) for callback in callbacks: - callback(resource, event_type) + callback(resource_list, event_type) def clear(): diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/callbacks/resources.py neutron-9.0.0~b3~dev557/neutron/api/rpc/callbacks/resources.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/callbacks/resources.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/callbacks/resources.py 2016-08-29 20:05:49.000000000 +0000 @@ -11,23 +11,32 @@ # under the License. from neutron.objects.qos import policy +from neutron.objects import trunk +_TRUNK_CLS = trunk.Trunk _QOS_POLICY_CLS = policy.QosPolicy +_SUBPORT_CLS = trunk.SubPort _VALID_CLS = ( + _TRUNK_CLS, _QOS_POLICY_CLS, + _SUBPORT_CLS, ) _VALID_TYPES = [cls.obj_name() for cls in _VALID_CLS] # Supported types +TRUNK = _TRUNK_CLS.obj_name() QOS_POLICY = _QOS_POLICY_CLS.obj_name() +SUBPORT = _SUBPORT_CLS.obj_name() _TYPE_TO_CLS_MAP = { + TRUNK: _TRUNK_CLS, QOS_POLICY: _QOS_POLICY_CLS, + SUBPORT: _SUBPORT_CLS, } LOCAL_RESOURCE_VERSIONS = { diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/handlers/dhcp_rpc.py neutron-9.0.0~b3~dev557/neutron/api/rpc/handlers/dhcp_rpc.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/handlers/dhcp_rpc.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/handlers/dhcp_rpc.py 2016-08-03 20:10:33.000000000 +0000 @@ -33,6 +33,7 @@ from neutron.db import api as db_api from neutron.db import provisioning_blocks from neutron.extensions import portbindings +from neutron.extensions import segment as segment_ext from neutron import manager from neutron.plugins.common import utils as p_utils from neutron.quota import resource_registry @@ -143,6 +144,30 @@ # the order changes. subnets = sorted(plugin.get_subnets(context, filters=filters), key=operator.itemgetter('id')) + # Handle the possibility that the dhcp agent(s) only has connectivity + # inside a segment. If the segment service plugin is loaded and + # there are active dhcp enabled subnets, then filter out the subnets + # that are not on the host's segment. + seg_plug = manager.NeutronManager.get_service_plugins().get( + segment_ext.SegmentPluginBase.get_plugin_type()) + seg_subnets = [subnet for subnet in subnets + if subnet.get('segment_id')] + if seg_plug and seg_subnets: + host_segment_ids = seg_plug.get_segments_by_hosts(context, [host]) + # Gather the ids of all the subnets that are on a segment that + # this host touches + seg_subnet_ids = {subnet['id'] for subnet in seg_subnets + if subnet['segment_id'] in host_segment_ids} + # Gather the ids of all the networks that are routed + routed_net_ids = {seg_subnet['network_id'] + for seg_subnet in seg_subnets} + # Remove the subnets with segments that are not in the same + # segments as the host. Do this only for the networks that are + # routed because we want non-routed networks to work as + # before. + subnets = [subnet for subnet in subnets + if subnet['network_id'] not in routed_net_ids or + subnet['id'] in seg_subnet_ids] grouped_subnets = self._group_by_network_id(subnets) grouped_ports = self._group_by_network_id(ports) @@ -167,12 +192,27 @@ "been deleted concurrently.", network_id) return filters = dict(network_id=[network_id]) + subnets = plugin.get_subnets(context, filters=filters) + seg_plug = manager.NeutronManager.get_service_plugins().get( + segment_ext.SegmentPluginBase.get_plugin_type()) + if seg_plug and subnets: + seg_subnets = [subnet for subnet in subnets + if subnet.get('segment_id')] + # If there are no subnets with segments, then this is not a routed + # network and no filtering should take place. + if seg_subnets: + segment_ids = seg_plug.get_segments_by_hosts(context, [host]) + # There might be something to do if no segment_ids exist that + # are mapped to this host. However, it seems that if this + # host is not mapped to any segments and this is a routed + # network, then this host shouldn't have even been scheduled + # to. + subnets = [subnet for subnet in seg_subnets + if subnet['segment_id'] in segment_ids] # NOTE(kevinbenton): we sort these because the agent builds tags # based on position in the list and has to restart the process if # the order changes. - network['subnets'] = sorted( - plugin.get_subnets(context, filters=filters), - key=operator.itemgetter('id')) + network['subnets'] = sorted(subnets, key=operator.itemgetter('id')) network['ports'] = plugin.get_ports(context, filters=filters) return network @@ -232,6 +272,7 @@ 'host': host}) return self._port_action(plugin, context, port, 'update_port') + @db_api.retry_db_errors def dhcp_ready_on_ports(self, context, port_ids): for port_id in port_ids: provisioning_blocks.provisioning_complete( diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/handlers/l3_rpc.py neutron-9.0.0~b3~dev557/neutron/api/rpc/handlers/l3_rpc.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/handlers/l3_rpc.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/handlers/l3_rpc.py 2016-08-03 20:10:33.000000000 +0000 @@ -90,11 +90,6 @@ context = neutron_context.get_admin_context() if utils.is_extension_supported( self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS): - # only auto schedule routers that were specifically requested; - # on agent full sync routers will be auto scheduled in - # get_router_ids() - if cfg.CONF.router_auto_schedule and router_ids: - self.l3plugin.auto_schedule_routers(context, host, router_ids) routers = ( self.l3plugin.list_active_sync_routers_on_active_l3_agent( context, host, router_ids)) @@ -198,8 +193,8 @@ # Ports that are DVR interfaces have multiple bindings (based on # of hosts on which DVR router interfaces are spawned). Such # bindings are created/updated here by invoking - # update_dvr_port_binding - self.plugin.update_dvr_port_binding(context, port['id'], + # update_distributed_port_binding + self.plugin.update_distributed_port_binding(context, port['id'], {'port': {portbindings.HOST_ID: host, 'device_id': router_id} diff -Nru neutron-9.0.0~b2~dev280/neutron/api/rpc/handlers/resources_rpc.py neutron-9.0.0~b3~dev557/neutron/api/rpc/handlers/resources_rpc.py --- neutron-9.0.0~b2~dev280/neutron/api/rpc/handlers/resources_rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/rpc/handlers/resources_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,9 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +import collections + from neutron_lib import exceptions from oslo_log import helpers as log_helpers -from oslo_log import log as logging import oslo_messaging from neutron._i18n import _ @@ -29,9 +30,6 @@ from neutron.objects import base as obj_base -LOG = logging.getLogger(__name__) - - class ResourcesRpcError(exceptions.NeutronException): pass @@ -179,27 +177,63 @@ def __init__(self): target = oslo_messaging.Target( - version='1.0', namespace=constants.RPC_NAMESPACE_RESOURCES) self.client = n_rpc.get_client(target) - def _prepare_object_fanout_context(self, obj, version): + def _prepare_object_fanout_context(self, obj, resource_version, + rpc_version): """Prepare fanout context, one topic per object type.""" - obj_topic = resource_type_versioned_topic(obj.obj_name(), version) - return self.client.prepare(fanout=True, topic=obj_topic) + obj_topic = resource_type_versioned_topic(obj.obj_name(), + resource_version) + return self.client.prepare(fanout=True, topic=obj_topic, + version=rpc_version) + + @staticmethod + def _classify_resources_by_type(resource_list): + resources_by_type = collections.defaultdict(list) + for resource in resource_list: + resource_type = resources.get_resource_type(resource) + resources_by_type[resource_type].append(resource) + return resources_by_type @log_helpers.log_method_call - def push(self, context, resource, event_type): - resource_type = resources.get_resource_type(resource) + def push(self, context, resource_list, event_type): + """Push an event and list of resources to agents, batched per type. + When a list of different resource types is passed to this method, + the push will be sent as separate individual list pushes, one per + resource type. + """ + + resources_by_type = self._classify_resources_by_type(resource_list) + for resource_type, type_resources in resources_by_type.items(): + self._push(context, resource_type, type_resources, event_type) + + def _push(self, context, resource_type, resource_list, event_type): + """Push an event and list of resources of the same type to agents.""" _validate_resource_type(resource_type) - versions = version_manager.get_resource_versions(resource_type) - for version in versions: - cctxt = self._prepare_object_fanout_context(resource, version) - dehydrated_resource = resource.obj_to_primitive( - target_version=version) - cctxt.cast(context, 'push', - resource=dehydrated_resource, - event_type=event_type) + compat_call = len(resource_list) == 1 + + for version in version_manager.get_resource_versions(resource_type): + cctxt = self._prepare_object_fanout_context( + resource_list[0], version, + rpc_version='1.0' if compat_call else '1.1') + + dehydrated_resources = [ + resource.obj_to_primitive(target_version=version) + for resource in resource_list] + + if compat_call: + #TODO(mangelajo): remove in Ocata, backwards compatibility + # for agents expecting a single element as + # a single element instead of a list, this + # is only relevant to the QoSPolicy topic queue + cctxt.cast(context, 'push', + resource=dehydrated_resources[0], + event_type=event_type) + else: + cctxt.cast(context, 'push', + resource_list=dehydrated_resources, + event_type=event_type) class ResourcesPushRpcCallback(object): @@ -211,14 +245,22 @@ """ # History # 1.0 Initial version + # 1.1 push method introduces resource_list support - target = oslo_messaging.Target(version='1.0', + target = oslo_messaging.Target(version='1.1', namespace=constants.RPC_NAMESPACE_RESOURCES) - def push(self, context, resource, event_type): - resource_obj = obj_base.NeutronObject.clean_obj_from_primitive( - resource) - LOG.debug("Resources notification (%(event_type)s): %(resource)s", - {'event_type': event_type, 'resource': repr(resource_obj)}) - resource_type = resources.get_resource_type(resource_obj) - cons_registry.push(resource_type, resource_obj, event_type) + def push(self, context, **kwargs): + """Push receiver, will always receive resources of the same type.""" + # TODO(mangelajo): accept single 'resource' parameter for backwards + # compatibility during Newton, remove in Ocata + resource_list = ([kwargs['resource']] if 'resource' in kwargs else + kwargs['resource_list']) + event_type = kwargs['event_type'] + + resource_objs = [ + obj_base.NeutronObject.clean_obj_from_primitive(resource) + for resource in resource_list] + + resource_type = resources.get_resource_type(resource_objs[0]) + cons_registry.push(resource_type, resource_objs, event_type) diff -Nru neutron-9.0.0~b2~dev280/neutron/api/v2/attributes.py neutron-9.0.0~b3~dev557/neutron/api/v2/attributes.py --- neutron-9.0.0~b2~dev280/neutron/api/v2/attributes.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/v2/attributes.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,7 +24,6 @@ from neutron._i18n import _ from neutron.common import _deprecate -from neutron.common import constants as n_const # Defining a constant to avoid repeating string literal in several modules @@ -280,12 +279,12 @@ 'is_visible': True}, 'ipv6_ra_mode': {'allow_post': True, 'allow_put': False, 'default': constants.ATTR_NOT_SPECIFIED, - 'validate': {'type:values': n_const.IPV6_MODES}, + 'validate': {'type:values': constants.IPV6_MODES}, 'is_visible': True}, 'ipv6_address_mode': {'allow_post': True, 'allow_put': False, 'default': constants.ATTR_NOT_SPECIFIED, 'validate': {'type:values': - n_const.IPV6_MODES}, + constants.IPV6_MODES}, 'is_visible': True}, SHARED: {'allow_post': False, 'allow_put': False, @@ -430,8 +429,9 @@ if 'validate' not in attr_vals: continue for rule in attr_vals['validate']: - res = lib_validators.validators[rule](res_dict[attr], - attr_vals['validate'][rule]) + validator = lib_validators.get_validator(rule) + res = validator(res_dict[attr], attr_vals['validate'][rule]) + if res: msg_dict = dict(attr=attr, reason=res) msg = _("Invalid input for %(attr)s. " diff -Nru neutron-9.0.0~b2~dev280/neutron/api/v2/base.py neutron-9.0.0~b3~dev557/neutron/api/v2/base.py --- neutron-9.0.0~b2~dev280/neutron/api/v2/base.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/v2/base.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,7 +18,6 @@ import netaddr from neutron_lib import exceptions -from oslo_config import cfg from oslo_log import log as logging from oslo_policy import policy as oslo_policy from oslo_utils import excutils @@ -90,9 +89,6 @@ self._policy_attrs = [name for (name, info) in self._attr_info.items() if info.get('required_by_policy')] self._notifier = n_rpc.get_notifier('network') - if cfg.CONF.notify_nova_on_port_data_changes: - from neutron.notifiers import nova - self._nova_notifier = nova.Notifier() self._member_actions = member_actions self._primary_key = self._get_primary_key() if self._allow_pagination and self._native_pagination: @@ -105,7 +101,7 @@ LOG.info(_LI("Allow sorting is enabled because native " "pagination requires native sorting")) self._allow_sorting = True - + self.parent = parent if parent: self._parent_id_name = '%s_id' % parent['member_name'] parent_part = '_%s' % parent['member_name'] @@ -132,14 +128,10 @@ return getattr(self._plugin, native_bulk_attr_name, False) def _is_native_pagination_supported(self): - native_pagination_attr_name = ("_%s__native_pagination_support" - % self._plugin.__class__.__name__) - return getattr(self._plugin, native_pagination_attr_name, False) + return api_common.is_native_pagination_supported(self._plugin) def _is_native_sorting_supported(self): - native_sorting_attr_name = ("_%s__native_sorting_support" - % self._plugin.__class__.__name__) - return getattr(self._plugin, native_sorting_attr_name, False) + return api_common.is_native_sorting_supported(self._plugin) def _exclude_attributes_by_policy(self, context, data): """Identifies attributes to exclude according to authZ policies. @@ -327,10 +319,6 @@ pluralized=self._collection) return obj - def _send_nova_notification(self, action, orig, returned): - if hasattr(self, '_nova_notifier'): - self._nova_notifier.send_network_change(action, orig, returned) - @db_api.retry_db_errors def index(self, request, **kwargs): """Returns a list of the requested entity.""" @@ -473,7 +461,8 @@ registry.notify(self._resource, events.BEFORE_RESPONSE, self, context=request.context, data=create_result, method_name=notifier_method, - collection=self._collection) + collection=self._collection, + action=action, original={}) return create_result def do_create(body, bulk=False, emulated=False): @@ -519,8 +508,6 @@ return notify({self._collection: objs}) else: obj = do_create(body) - self._send_nova_notification(action, {}, - {self._resource: obj}) return notify({self._resource: self._view(request.context, obj)}) @@ -559,14 +546,16 @@ # usage trackers as dirty resource_registry.set_resources_dirty(request.context) notifier_method = self._resource + '.delete.end' + result = {self._resource: self._view(request.context, obj)} + notifier_payload = {self._resource + '_id': id} + notifier_payload.update(result) self._notifier.info(request.context, notifier_method, - {self._resource + '_id': id}) - result = {self._resource: self._view(request.context, obj)} - self._send_nova_notification(action, {}, result) + notifier_payload) registry.notify(self._resource, events.BEFORE_RESPONSE, self, context=request.context, data=result, - method_name=notifier_method) + method_name=notifier_method, action=action, + original={}) def update(self, request, id, body=None, **kwargs): """Updates the specified entity's attributes.""" @@ -613,10 +602,12 @@ pluralized=self._collection) except oslo_policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception() as ctxt: - # If a tenant is modifying it's own object, it's safe to return + # If a tenant is modifying its own object, it's safe to return # a 403. Otherwise, pretend that it doesn't exist to avoid # giving away information. - if request.context.tenant_id != orig_obj['tenant_id']: + orig_obj_tenant_id = orig_obj.get("tenant_id") + if (request.context.tenant_id != orig_obj_tenant_id or + orig_obj_tenant_id is None): ctxt.reraise = False msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) @@ -637,8 +628,8 @@ self._notifier.info(request.context, notifier_method, result) registry.notify(self._resource, events.BEFORE_RESPONSE, self, context=request.context, data=result, - method_name=notifier_method) - self._send_nova_notification(action, orig_object_copy, result) + method_name=notifier_method, action=action, + original=orig_object_copy) return result @staticmethod diff -Nru neutron-9.0.0~b2~dev280/neutron/api/v2/resource_helper.py neutron-9.0.0~b3~dev557/neutron/api/v2/resource_helper.py --- neutron-9.0.0~b2~dev280/neutron/api/v2/resource_helper.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/v2/resource_helper.py 2016-08-03 20:10:33.000000000 +0000 @@ -78,8 +78,8 @@ else: plugin = manager.NeutronManager.get_plugin() path_prefix = getattr(plugin, "path_prefix", "") - LOG.debug('Service %(service)s assigned prefix: %(prefix)s' - % {'service': which_service, 'prefix': path_prefix}) + LOG.debug('Service %(service)s assigned prefix: %(prefix)s', + {'service': which_service, 'prefix': path_prefix}) for collection_name in resource_map: resource_name = plural_mappings[collection_name] params = resource_map.get(collection_name, {}) diff -Nru neutron-9.0.0~b2~dev280/neutron/api/v2/resource.py neutron-9.0.0~b3~dev557/neutron/api/v2/resource.py --- neutron-9.0.0~b2~dev280/neutron/api/v2/resource.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/v2/resource.py 2016-08-03 20:10:33.000000000 +0000 @@ -23,6 +23,7 @@ from neutron._i18n import _LE, _LI from neutron.api import api_common +from neutron.common import utils from neutron import wsgi @@ -83,7 +84,13 @@ LOG.info(_LI('%(action)s failed (client error): %(exc)s'), {'action': action, 'exc': mapped_exc}) else: - LOG.exception(_LE('%s failed'), action) + LOG.exception( + _LE('%(action)s failed: %(details)s'), + { + 'action': action, + 'details': utils.extract_exc_details(e), + } + ) raise mapped_exc status = action_status.get(action, 200) diff -Nru neutron-9.0.0~b2~dev280/neutron/api/v2/router.py neutron-9.0.0~b3~dev557/neutron/api/v2/router.py --- neutron-9.0.0~b2~dev280/neutron/api/v2/router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/api/v2/router.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from neutron_lib import constants from oslo_config import cfg from oslo_service import wsgi as base_wsgi import routes as routes_mapper @@ -38,7 +39,7 @@ SUB_RESOURCES = {} COLLECTION_ACTIONS = ['index', 'create'] MEMBER_ACTIONS = ['show', 'update', 'delete'] -REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'json'} +REQUIREMENTS = {'id': constants.UUID_PATTERN, 'format': 'json'} class Index(wsgi.Application): diff -Nru neutron-9.0.0~b2~dev280/neutron/callbacks/events.py neutron-9.0.0~b3~dev557/neutron/callbacks/events.py --- neutron-9.0.0~b2~dev280/neutron/callbacks/events.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/callbacks/events.py 2016-08-29 20:05:49.000000000 +0000 @@ -29,6 +29,11 @@ BEFORE_RESPONSE = 'before_response' AFTER_REQUEST = 'after_request' +# String literals representing events associated to process operations +BEFORE_INIT = 'before_init' +BEFORE_SPAWN = 'before_spawn' # sent per process +AFTER_INIT = 'after_init' # sent per worker + # String literals representing events associated to error conditions ABORT_CREATE = 'abort_create' ABORT_READ = 'abort_read' diff -Nru neutron-9.0.0~b2~dev280/neutron/callbacks/exceptions.py neutron-9.0.0~b3~dev557/neutron/callbacks/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/callbacks/exceptions.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/callbacks/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,14 +13,13 @@ from neutron_lib import exceptions from neutron._i18n import _ -from neutron.common import exceptions as n_exc class Invalid(exceptions.NeutronException): message = _("The value '%(value)s' for %(element)s is not valid.") -class CallbackFailure(n_exc.MultipleExceptions): +class CallbackFailure(exceptions.MultipleExceptions): def __init__(self, errors): self.errors = errors diff -Nru neutron-9.0.0~b2~dev280/neutron/callbacks/manager.py neutron-9.0.0~b3~dev557/neutron/callbacks/manager.py --- neutron-9.0.0~b2~dev280/neutron/callbacks/manager.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/callbacks/manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -166,4 +166,6 @@ # TODO(armax): consider using something other than names # https://www.python.org/dev/peps/pep-3155/, but this # might be okay for now. - return reflection.get_callable_name(callback) + parts = (reflection.get_callable_name(callback), + str(hash(callback))) + return '-'.join(parts) diff -Nru neutron-9.0.0~b2~dev280/neutron/callbacks/resources.py neutron-9.0.0~b3~dev557/neutron/callbacks/resources.py --- neutron-9.0.0~b2~dev280/neutron/callbacks/resources.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/callbacks/resources.py 2016-08-03 20:10:33.000000000 +0000 @@ -18,12 +18,14 @@ NETWORKS = 'networks' PORT = 'port' PORTS = 'ports' +PORT_DEVICE = 'port_device' PROCESS = 'process' ROUTER = 'router' ROUTER_GATEWAY = 'router_gateway' ROUTER_INTERFACE = 'router_interface' SECURITY_GROUP = 'security_group' SECURITY_GROUP_RULE = 'security_group_rule' +SEGMENT = 'segment' SUBNET = 'subnet' SUBNETS = 'subnets' SUBNET_GATEWAY = 'subnet_gateway' diff -Nru neutron-9.0.0~b2~dev280/neutron/cmd/ipset_cleanup.py neutron-9.0.0~b3~dev557/neutron/cmd/ipset_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/cmd/ipset_cleanup.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/cmd/ipset_cleanup.py 2016-08-03 20:10:33.000000000 +0000 @@ -16,10 +16,10 @@ from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LE, _LI -from neutron.agent.linux import ipset_manager +from neutron._i18n import _LE, _LI from neutron.agent.linux import utils from neutron.common import config +from neutron.conf.agent import cmd as command LOG = logging.getLogger(__name__) @@ -31,22 +31,8 @@ Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ - - cli_opts = [ - cfg.BoolOpt('allsets', - default=False, - help=_('Destroy all IPsets.')), - cfg.BoolOpt('force', - default=False, - help=_('Destroy IPsets even if there is an iptables ' - 'reference.')), - cfg.StrOpt('prefix', - default=ipset_manager.NET_PREFIX, - help=_('String prefix used to match IPset names.')), - ] - conf = cfg.CONF - conf.register_cli_opts(cli_opts) + command.register_cmd_opts(command.ip_opts, conf) return conf diff -Nru neutron-9.0.0~b2~dev280/neutron/cmd/netns_cleanup.py neutron-9.0.0~b3~dev557/neutron/cmd/netns_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/cmd/netns_cleanup.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/cmd/netns_cleanup.py 2016-08-03 20:10:33.000000000 +0000 @@ -22,10 +22,9 @@ from oslo_log import log as logging from oslo_utils import importutils -from neutron._i18n import _, _LE +from neutron._i18n import _LE from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib -from neutron.agent.dhcp import config as dhcp_config from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import dvr from neutron.agent.l3 import dvr_fip_ns @@ -34,6 +33,8 @@ from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import config +from neutron.conf.agent import cmd +from neutron.conf.agent import dhcp as dhcp_config LOG = logging.getLogger(__name__) @@ -60,21 +61,10 @@ from the main config that do not apply during clean-up. """ - cli_opts = [ - cfg.BoolOpt('force', - default=False, - help=_('Delete the namespace by removing all devices.')), - cfg.StrOpt('agent-type', - choices=['dhcp', 'l3', 'lbaas'], - help=_('Cleanup resources of a specific agent type only.')), - ] - conf = cfg.CONF - conf.register_cli_opts(cli_opts) + cmd.register_cmd_opts(cmd.netns_opts, conf) agent_config.register_interface_driver_opts_helper(conf) - conf.register_opts(dhcp_config.DHCP_AGENT_OPTS) - conf.register_opts(dhcp_config.DHCP_OPTS) - conf.register_opts(dhcp_config.DNSMASQ_OPTS) + dhcp_config.register_agent_dhcp_opts(conf) conf.register_opts(interface.OPTS) return conf diff -Nru neutron-9.0.0~b2~dev280/neutron/cmd/ovs_cleanup.py neutron-9.0.0~b3~dev557/neutron/cmd/ovs_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/cmd/ovs_cleanup.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/cmd/ovs_cleanup.py 2016-08-03 20:10:33.000000000 +0000 @@ -16,13 +16,14 @@ from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _, _LI +from neutron._i18n import _LI from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib -from neutron.agent.l3 import config as l3_config from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.common import config +from neutron.conf.agent import cmd +from neutron.conf.agent.l3 import config as l3_config LOG = logging.getLogger(__name__) @@ -34,18 +35,10 @@ Use separate setup_conf for the utility because there are many options from the main config that do not apply during clean-up. """ - opts = [ - cfg.BoolOpt('ovs_all_ports', - default=False, - help=_('True to delete all ports on all the OpenvSwitch ' - 'bridges. False to delete ports created by ' - 'Neutron on integration and external network ' - 'bridges.')) - ] conf = cfg.CONF - conf.register_cli_opts(opts) - conf.register_opts(l3_config.OPTS) + cmd.register_cmd_opts(cmd.ovs_opts, conf) + l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf) conf.register_opts(interface.OPTS) agent_config.register_interface_driver_opts_helper(conf) return conf diff -Nru neutron-9.0.0~b2~dev280/neutron/cmd/sanity/checks.py neutron-9.0.0~b3~dev557/neutron/cmd/sanity/checks.py --- neutron-9.0.0~b2~dev280/neutron/cmd/sanity/checks.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/cmd/sanity/checks.py 2016-08-03 20:10:33.000000000 +0000 @@ -33,6 +33,7 @@ from neutron.agent.linux import keepalived from neutron.agent.linux import utils as agent_utils from neutron.common import constants +from neutron.common import utils as common_utils from neutron.plugins.common import constants as const from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants as ovs_const @@ -244,14 +245,14 @@ def verify_ipv6_address_assignment(self, gw_dev): process = self.manager.get_process() - agent_utils.wait_until_true(lambda: process.active) + common_utils.wait_until_true(lambda: process.active) def _gw_vip_assigned(): iface_ip = gw_dev.addr.list(ip_version=6, scope='global') if iface_ip: return self.gw_vip == iface_ip[0]['cidr'] - agent_utils.wait_until_true(_gw_vip_assigned) + common_utils.wait_until_true(_gw_vip_assigned) def __enter__(self): ip_lib.IPWrapper().netns.add(self.nsname) @@ -334,8 +335,7 @@ with ovs_lib.OVSBridge(br_name) as br: try: - br.set_protocols( - "OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13,OpenFlow14") + br.set_protocols(["OpenFlow%d" % i for i in range(10, 15)]) except RuntimeError as e: LOG.debug("Exception while checking ovs conntrack support: %s", e) return False diff -Nru neutron-9.0.0~b2~dev280/neutron/cmd/sanity_check.py neutron-9.0.0~b3~dev557/neutron/cmd/sanity_check.py --- neutron-9.0.0~b2~dev280/neutron/cmd/sanity_check.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/cmd/sanity_check.py 2016-08-29 20:05:49.000000000 +0000 @@ -200,7 +200,7 @@ 'failed. OVS/CT firewall will not work. A newer ' 'version of OVS (2.5+) and linux kernel (4.3+) are ' 'required. See ' - 'https://github.com/openvswitch/ovs/blob/master/FAQ.md' + 'https://github.com/openvswitch/ovs/blob/master/FAQ.md ' 'for more information.')) return result diff -Nru neutron-9.0.0~b2~dev280/neutron/common/cache_utils.py neutron-9.0.0~b3~dev557/neutron/common/cache_utils.py --- neutron-9.0.0~b2~dev280/neutron/common/cache_utils.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/cache_utils.py 2016-08-03 20:10:33.000000000 +0000 @@ -65,7 +65,6 @@ backend = parsed.scheme if backend == 'memory': - backend = 'oslo_cache.dict' query = parsed.query # NOTE(fangzhen): The following NOTE and code is from legacy # oslo-incubator cache module. Previously reside in neutron at @@ -78,11 +77,17 @@ if not query and '?' in parsed.path: query = parsed.path.split('?', 1)[-1] parameters = parse.parse_qs(query) - expiration_time = int(parameters.get('default_ttl', [0])[0]) - region = cache.create_region() - region.configure(backend, expiration_time=expiration_time) - return region + conf = cfg.ConfigOpts() + register_oslo_configs(conf) + cache_conf_dict = { + 'enabled': True, + 'backend': 'oslo_cache.dict', + 'expiration_time': int(parameters.get('default_ttl', [0])[0]), + } + for k, v in cache_conf_dict.items(): + conf.set_override(k, v, group='cache') + return _get_cache_region(conf) else: raise RuntimeError(_('Old style configuration can use only memory ' '(dict) backend')) @@ -108,6 +113,8 @@ key = (func_name,) + args if kwargs: key += utils.dict2tuple(kwargs) + # oslo.cache expects a string or a buffer + key = str(key) try: item = target_self._cache.get(key) except TypeError: diff -Nru neutron-9.0.0~b2~dev280/neutron/common/config.py neutron-9.0.0~b3~dev557/neutron/common/config.py --- neutron-9.0.0~b2~dev280/neutron/common/config.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -29,175 +29,15 @@ from oslo_service import wsgi from neutron._i18n import _, _LI -from neutron import api -from neutron.common import constants -from neutron.common import utils +from neutron.conf import common as common_config from neutron import policy from neutron import version LOG = logging.getLogger(__name__) -core_opts = [ - cfg.StrOpt('bind_host', default='0.0.0.0', - help=_("The host IP to bind to")), - cfg.PortOpt('bind_port', default=9696, - help=_("The port to bind to")), - cfg.StrOpt('api_extensions_path', default="", - help=_("The path for API extensions. " - "Note that this can be a colon-separated list of paths. " - "For example: api_extensions_path = " - "extensions:/path/to/more/exts:/even/more/exts. " - "The __path__ of neutron.extensions is appended to " - "this, so if your extensions are in there you don't " - "need to specify them here.")), - cfg.StrOpt('auth_strategy', default='keystone', - help=_("The type of authentication to use")), - cfg.StrOpt('core_plugin', - help=_("The core plugin Neutron will use")), - cfg.ListOpt('service_plugins', default=[], - help=_("The service plugins Neutron will use")), - cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", - help=_("The base MAC address Neutron will use for VIFs. " - "The first 3 octets will remain unchanged. If the 4th " - "octet is not 00, it will also be used. The others " - "will be randomly generated.")), - cfg.IntOpt('mac_generation_retries', default=16, - deprecated_for_removal=True, - help=_("How many times Neutron will retry MAC generation. This " - "option is now obsolete and so is deprecated to be " - "removed in the Ocata release.")), - cfg.BoolOpt('allow_bulk', default=True, - help=_("Allow the usage of the bulk API")), - cfg.BoolOpt('allow_pagination', default=api.DEFAULT_ALLOW_PAGINATION, - help=_("Allow the usage of the pagination")), - cfg.BoolOpt('allow_sorting', default=api.DEFAULT_ALLOW_SORTING, - help=_("Allow the usage of the sorting")), - cfg.StrOpt('pagination_max_limit', default="-1", - help=_("The maximum number of items returned in a single " - "response, value was 'infinite' or negative integer " - "means no limit")), - cfg.ListOpt('default_availability_zones', default=[], - help=_("Default value of availability zone hints. The " - "availability zone aware schedulers use this when " - "the resources availability_zone_hints is empty. " - "Multiple availability zones can be specified by a " - "comma separated string. This value can be empty. " - "In this case, even if availability_zone_hints for " - "a resource is empty, availability zone is " - "considered for high availability while scheduling " - "the resource.")), - cfg.IntOpt('max_dns_nameservers', default=5, - help=_("Maximum number of DNS nameservers per subnet")), - cfg.IntOpt('max_subnet_host_routes', default=20, - help=_("Maximum number of host routes per subnet")), - cfg.IntOpt('max_fixed_ips_per_port', default=5, - deprecated_for_removal=True, - help=_("Maximum number of fixed ips per port. This option " - "is deprecated and will be removed in the N " - "release.")), - cfg.StrOpt('default_ipv4_subnet_pool', deprecated_for_removal=True, - help=_("Default IPv4 subnet pool to be used for automatic " - "subnet CIDR allocation. " - "Specifies by UUID the pool to be used in case where " - "creation of a subnet is being called without a " - "subnet pool ID. If not set then no pool " - "will be used unless passed explicitly to the subnet " - "create. If no pool is used, then a CIDR must be passed " - "to create a subnet and that subnet will not be " - "allocated from any pool; it will be considered part of " - "the tenant's private address space. This option is " - "deprecated for removal in the N release.")), - cfg.StrOpt('default_ipv6_subnet_pool', deprecated_for_removal=True, - help=_("Default IPv6 subnet pool to be used for automatic " - "subnet CIDR allocation. " - "Specifies by UUID the pool to be used in case where " - "creation of a subnet is being called without a " - "subnet pool ID. See the description for " - "default_ipv4_subnet_pool for more information. This " - "option is deprecated for removal in the N release.")), - cfg.BoolOpt('ipv6_pd_enabled', default=False, - help=_("Enables IPv6 Prefix Delegation for automatic subnet " - "CIDR allocation. " - "Set to True to enable IPv6 Prefix Delegation for " - "subnet allocation in a PD-capable environment. Users " - "making subnet creation requests for IPv6 subnets " - "without providing a CIDR or subnetpool ID will be " - "given a CIDR via the Prefix Delegation mechanism. " - "Note that enabling PD will override the behavior of " - "the default IPv6 subnetpool.")), - cfg.IntOpt('dhcp_lease_duration', default=86400, - deprecated_name='dhcp_lease_time', - help=_("DHCP lease duration (in seconds). Use -1 to tell " - "dnsmasq to use infinite lease times.")), - cfg.StrOpt('dns_domain', - default='openstacklocal', - help=_('Domain to use for building the hostnames')), - cfg.StrOpt('external_dns_driver', - help=_('Driver for external DNS integration.')), - cfg.BoolOpt('dhcp_agent_notification', default=True, - help=_("Allow sending resource operation" - " notification to DHCP agent")), - cfg.BoolOpt('allow_overlapping_ips', default=False, - help=_("Allow overlapping IP support in Neutron. " - "Attention: the following parameter MUST be set to " - "False if Neutron is being used in conjunction with " - "Nova security groups.")), - cfg.StrOpt('host', default=utils.get_hostname(), - sample_default='example.domain', - help=_("Hostname to be used by the Neutron server, agents and " - "services running on this machine. All the agents and " - "services running on this machine must use the same " - "host value.")), - cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, - help=_("Send notification to nova when port status changes")), - cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, - help=_("Send notification to nova when port data (fixed_ips/" - "floatingip) changes so nova can update its cache.")), - cfg.IntOpt('send_events_interval', default=2, - help=_('Number of seconds between sending events to nova if ' - 'there are any events to send.')), - cfg.BoolOpt('advertise_mtu', default=True, - deprecated_for_removal=True, - help=_('If True, advertise network MTU values if core plugin ' - 'calculates them. MTU is advertised to running ' - 'instances via DHCP and RA MTU options.')), - cfg.StrOpt('ipam_driver', - help=_("Neutron IPAM (IP address management) driver to use. " - "If ipam_driver is not set (default behavior), no IPAM " - "driver is used. In order to use the reference " - "implementation of Neutron IPAM driver, " - "use 'internal'.")), - cfg.BoolOpt('vlan_transparent', default=False, - help=_('If True, then allow plugins that support it to ' - 'create VLAN transparent networks.')), - cfg.StrOpt('web_framework', default='legacy', - choices=('legacy', 'pecan'), - help=_("This will choose the web framework in which to run " - "the Neutron API server. 'pecan' is a new experiemental " - "rewrite of the API server.")), - cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU, - deprecated_name='segment_mtu', deprecated_group='ml2', - help=_('MTU of the underlying physical network. Neutron uses ' - 'this value to calculate MTU for all virtual network ' - 'components. For flat and VLAN networks, neutron uses ' - 'this value without modification. For overlay networks ' - 'such as VXLAN, neutron automatically subtracts the ' - 'overlay protocol overhead from this value. Defaults ' - 'to 1500, the standard value for Ethernet.')) -] - -core_cli_opts = [ - cfg.StrOpt('state_path', - default='/var/lib/neutron', - help=_("Where to store Neutron state files. " - "This directory must be writable by the agent.")), -] - # Register the configuration options -cfg.CONF.register_opts(core_opts) -cfg.CONF.register_cli_opts(core_cli_opts) -wsgi.register_opts(cfg.CONF) +common_config.register_core_common_config_opts() # Ensure that the control exchange is set correctly oslo_messaging.set_transport_defaults(control_exchange='neutron') @@ -209,7 +49,7 @@ db_options.set_defaults( cfg.CONF, connection='sqlite://', - sqlite_db='', max_pool_size=10, + max_pool_size=10, max_overflow=20, pool_timeout=10) set_db_defaults() @@ -219,18 +59,9 @@ ks_loading.register_auth_conf_options(cfg.CONF, NOVA_CONF_SECTION) ks_loading.register_session_conf_options(cfg.CONF, NOVA_CONF_SECTION) -nova_opts = [ - cfg.StrOpt('region_name', - help=_('Name of nova region to use. Useful if keystone manages' - ' more than one region.')), - cfg.StrOpt('endpoint_type', - default='public', - choices=['public', 'admin', 'internal'], - help=_('Type of the nova endpoint to use. This endpoint will' - ' be looked up in the keystone catalog and should be' - ' one of public, internal or admin.')), -] -cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION) + +# Register the nova configuration options +common_config.register_nova_opts() logging.register_options(cfg.CONF) diff -Nru neutron-9.0.0~b2~dev280/neutron/common/constants.py neutron-9.0.0~b3~dev557/neutron/common/constants.py --- neutron-9.0.0~b2~dev280/neutron/common/constants.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/constants.py 2016-08-29 20:05:49.000000000 +0000 @@ -28,10 +28,6 @@ # for agents to indicate when they are wiring up the ports. The following is # to indicate when the server is busy building sub-components of a router ROUTER_STATUS_ALLOCATING = 'ALLOCATING' -L3_AGENT_MODE_DVR = 'dvr' -L3_AGENT_MODE_DVR_SNAT = 'dvr_snat' -L3_AGENT_MODE_LEGACY = 'legacy' -L3_AGENT_MODE = 'agent_mode' DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port" @@ -43,11 +39,11 @@ HA_NETWORK_NAME = 'HA network tenant %s' HA_SUBNET_NAME = 'HA subnet tenant %s' HA_PORT_NAME = 'HA port tenant %s' -MINIMUM_AGENTS_FOR_HA = 2 +MINIMUM_MINIMUM_AGENTS_FOR_HA = 1 +DEFAULT_MINIMUM_AGENTS_FOR_HA = 2 HA_ROUTER_STATE_ACTIVE = 'active' HA_ROUTER_STATE_STANDBY = 'standby' -AGENT_TYPE_MACVTAP = 'Macvtap agent' PAGINATION_INFINITE = 'infinite' SORT_DIRECTION_ASC = 'asc' @@ -67,11 +63,6 @@ IP_PROTOCOL_NUM_TO_NAME_MAP = { str(v): k for k, v in lib_constants.IP_PROTOCOL_MAP.items()} -DHCPV6_STATEFUL = 'dhcpv6-stateful' -DHCPV6_STATELESS = 'dhcpv6-stateless' -IPV6_SLAAC = 'slaac' -IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC] - # Special provisional prefix for IPv6 Prefix Delegation PROVISIONAL_IPV6_PD_PREFIX = '::/64' @@ -131,6 +122,9 @@ IP_ALLOWED_VERSIONS = [lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6] +PORT_RANGE_MIN = 1 +PORT_RANGE_MAX = 65535 + # Some components communicate using private address ranges, define # them all here. These address ranges should not cause any issues # even if they overlap since they are used in disjoint namespaces, diff -Nru neutron-9.0.0~b2~dev280/neutron/common/_deprecate.py neutron-9.0.0~b3~dev557/neutron/common/_deprecate.py --- neutron-9.0.0~b2~dev280/neutron/common/_deprecate.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/_deprecate.py 2016-08-29 20:05:49.000000000 +0000 @@ -40,8 +40,6 @@ debtcollector.deprecate( name, message='moved to %s' % other_mod.__name__, - version='mitaka', - removal_version='newton', stacklevel=4) return vars(other_mod)[name] diff -Nru neutron-9.0.0~b2~dev280/neutron/common/exceptions.py neutron-9.0.0~b3~dev557/neutron/common/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/common/exceptions.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,19 +21,6 @@ from neutron.common import _deprecate -class MultipleExceptions(Exception): - """Container for multiple exceptions encountered. - - The API layer of Neutron will automatically unpack, translate, - filter, and combine the inner exceptions in any exception derived - from this class. - """ - - def __init__(self, exceptions, *args, **kwargs): - super(MultipleExceptions, self).__init__(*args, **kwargs) - self.inner_exceptions = exceptions - - class SubnetPoolNotFound(e.NotFound): message = _("Subnet pool %(subnetpool_id)s could not be found.") @@ -57,14 +44,6 @@ "could not be found.") -class PolicyInitError(e.NeutronException): - message = _("Failed to init policy %(policy)s because %(reason)s.") - - -class PolicyCheckError(e.NeutronException): - message = _("Failed to check policy %(policy)s because %(reason)s.") - - class PolicyRemoveAuthorizationError(e.NotAuthorized): message = _("Failed to remove provided policy %(policy_id)s " "because you are not authorized.") @@ -313,10 +292,6 @@ message = _("Per-tenant subnet pool prefix quota exceeded.") -class DeviceNotFoundError(e.NeutronException): - message = _("Device '%(device_name)s' does not exist.") - - class NetworkSubnetPoolAffinityError(e.BadRequest): message = _("Subnets hosted on the same network must be allocated from " "the same subnet pool.") @@ -335,6 +310,10 @@ message = _("Quota for tenant %(tenant_id)s could not be found.") +class TenantIdProjectIdFilterConflict(e.BadRequest): + message = _("Both tenant_id and project_id passed as filters.") + + # Neutron-lib migration shim. This will wrap any exceptions that are moved # to that library in a deprecation warning, until they can be updated to # import directly from their new location. diff -Nru neutron-9.0.0~b2~dev280/neutron/common/ipv6_utils.py neutron-9.0.0~b3~dev557/neutron/common/ipv6_utils.py --- neutron-9.0.0~b2~dev280/neutron/common/ipv6_utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/ipv6_utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -23,7 +23,6 @@ from oslo_log import log from neutron._i18n import _, _LI -from neutron.common import constants LOG = log.getLogger(__name__) @@ -67,7 +66,7 @@ def is_auto_address_subnet(subnet): """Check if subnet is an auto address subnet.""" - modes = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS] + modes = [const.IPV6_SLAAC, const.DHCPV6_STATELESS] return (subnet['ipv6_address_mode'] in modes or subnet['ipv6_ra_mode'] in modes) diff -Nru neutron-9.0.0~b2~dev280/neutron/common/rpc.py neutron-9.0.0~b3~dev557/neutron/common/rpc.py --- neutron-9.0.0~b2~dev280/neutron/common/rpc.py 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -229,13 +229,7 @@ trace_info = rpc_ctxt_dict.pop("trace_info", None) if trace_info: profiler.init(**trace_info) - user_id = rpc_ctxt_dict.pop('user_id', None) - if not user_id: - user_id = rpc_ctxt_dict.pop('user', None) - tenant_id = rpc_ctxt_dict.pop('tenant_id', None) - if not tenant_id: - tenant_id = rpc_ctxt_dict.pop('project_id', None) - return context.Context(user_id, tenant_id, **rpc_ctxt_dict) + return context.Context.from_dict(rpc_ctxt_dict) @profiler.trace_cls("rpc") diff -Nru neutron-9.0.0~b2~dev280/neutron/common/utils.py neutron-9.0.0~b3~dev557/neutron/common/utils.py --- neutron-9.0.0~b2~dev280/neutron/common/utils.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/common/utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,9 +22,11 @@ import decimal import errno import functools +import importlib import math import multiprocessing import os +import os.path import random import signal import socket @@ -33,6 +35,7 @@ import time import uuid +import eventlet from eventlet.green import subprocess import netaddr from neutron_lib import constants as n_const @@ -45,6 +48,7 @@ import six from stevedore import driver +import neutron from neutron._i18n import _, _LE from neutron.db import api as db_api @@ -313,7 +317,7 @@ plain IP addresses specifically to avoid ambiguity. """ if '/' not in str(cidr): - raise ValueError("cidr doesn't contain a '/'") + raise ValueError(_("cidr doesn't contain a '/'")) net = netaddr.IPNetwork(cidr) if net.version == 4: return net.prefixlen == n_const.IPv4_BITS @@ -517,7 +521,7 @@ def create_object_with_dependency(creator, dep_getter, dep_creator, - dep_id_attr): + dep_id_attr, dep_deleter): """Creates an object that binds to a dependency while handling races. creator is a function that expected to take the result of either @@ -527,11 +531,14 @@ dep_id_attr be used to determine if the dependency changed during object creation. - dep_getter should return None if the dependency does not exist + dep_deleter will be called with a the result of dep_creator if the creator + function fails due to a non-dependency reason or the retries are exceeded. + + dep_getter should return None if the dependency does not exist. dep_creator can raise a DBDuplicateEntry to indicate that a concurrent - create of the dependency occured and the process will restart to get the - concurrently created one + create of the dependency occurred and the process will restart to get the + concurrently created one. This function will return both the created object and the dependency it used/created. @@ -541,17 +548,16 @@ process of creating the dependency if one no longer exists. It will give up after neutron.db.api.MAX_RETRIES and raise the exception it encounters after that. - - TODO(kevinbenton): currently this does not try to delete the dependency - it created. This matches the semantics of the HA network logic it is used - for but it should be modified to cleanup in the future. """ - result, dependency, dep_id = None, None, None + result, dependency, dep_id, made_locally = None, None, None, False for attempts in range(1, db_api.MAX_RETRIES + 1): # we go to max + 1 here so the exception handlers can raise their # errors at the end try: - dependency = dep_getter() or dep_creator() + dependency = dep_getter() + if not dependency: + dependency = dep_creator() + made_locally = True dep_id = getattr(dependency, dep_id_attr) except db_exc.DBDuplicateEntry: # dependency was concurrently created. @@ -575,6 +581,16 @@ if not dependency or dep_id != getattr(dependency, dep_id_attr): ctx.reraise = False + continue + # we have exceeded retries or have encountered a non-dependency + # related failure so we try to clean up the dependency if we + # created it before re-raising + if made_locally and dependency: + try: + dep_deleter(dependency) + except Exception: + LOG.exception(_LE("Failed cleaning up dependency %s"), + dep_id) return result, dependency @@ -594,13 +610,31 @@ """ @functools.wraps(f) def inner(self, context, *args, **kwargs): - if context.session.is_active: - raise RuntimeError(_("Method cannot be called within a " - "transaction.")) + # FIXME(kevinbenton): get rid of all uses of this flag + if (context.session.is_active and + getattr(context, 'GUARD_TRANSACTION', True)): + raise RuntimeError(_("Method %s cannot be called within a " + "transaction.") % f) return f(self, context, *args, **kwargs) return inner +def wait_until_true(predicate, timeout=60, sleep=1, exception=None): + """ + Wait until callable predicate is evaluated as True + + :param predicate: Callable deciding whether waiting should continue. + Best practice is to instantiate predicate with functools.partial() + :param timeout: Timeout in seconds how long should function wait. + :param sleep: Polling interval for results in seconds. + :param exception: Exception class for eventlet.Timeout. + (see doc for eventlet.Timeout for more information) + """ + with eventlet.timeout.Timeout(timeout, exception): + while not predicate(): + eventlet.sleep(sleep) + + class _AuthenticBase(object): def __init__(self, addr, **kwargs): super(_AuthenticBase, self).__init__(addr, **kwargs) @@ -643,3 +677,52 @@ def __get__(self, obj, owner): return self.func(owner) + + +_NO_ARGS_MARKER = object() + + +def attach_exc_details(e, msg, args=_NO_ARGS_MARKER): + e._error_context_msg = msg + e._error_context_args = args + + +def extract_exc_details(e): + for attr in ('_error_context_msg', '_error_context_args'): + if not hasattr(e, attr): + return _LE('No details.') + details = e._error_context_msg + args = e._error_context_args + if args is _NO_ARGS_MARKER: + return details + return details % args + + +def import_modules_recursively(topdir): + '''Import and return all modules below the topdir directory.''' + modules = [] + for root, dirs, files in os.walk(topdir): + for file_ in files: + if file_[-3:] != '.py': + continue + + module = file_[:-3] + if module == '__init__': + continue + + import_base = root.replace('/', '.') + + # NOTE(ihrachys): in Python3, or when we are not located in the + # directory containing neutron code, __file__ is absolute, so we + # should truncate it to exclude PYTHONPATH prefix + prefixlen = len(os.path.dirname(neutron.__file__)) + import_base = 'neutron' + import_base[prefixlen:] + + module = '.'.join([import_base, module]) + if module not in sys.modules: + importlib.import_module(module) + modules.append(module) + + for dir_ in dirs: + modules.extend(import_modules_recursively(dir_)) + return modules diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/agent/cmd.py neutron-9.0.0~b3~dev557/neutron/conf/agent/cmd.py --- neutron-9.0.0~b2~dev280/neutron/conf/agent/cmd.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/agent/cmd.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ +from neutron.agent.linux import ipset_manager + + +ip_opts = [ + cfg.BoolOpt('allsets', + default=False, + help=_('Destroy all IPsets.')), + cfg.BoolOpt('force', + default=False, + help=_('Destroy IPsets even if there is an iptables ' + 'reference.')), + cfg.StrOpt('prefix', + default=ipset_manager.NET_PREFIX, + help=_('String prefix used to match IPset names.')), +] + +netns_opts = [ + cfg.BoolOpt('force', + default=False, + help=_('Delete the namespace by removing all devices.')), + cfg.StrOpt('agent-type', + choices=['dhcp', 'l3', 'lbaas'], + help=_('Cleanup resources of a specific agent type only.')), +] + +ovs_opts = [ + cfg.BoolOpt('ovs_all_ports', + default=False, + help=_('True to delete all ports on all the OpenvSwitch ' + 'bridges. False to delete ports created by ' + 'Neutron on integration and external network ' + 'bridges.')) +] + + +def register_cmd_opts(opts, cfg=cfg.CONF): + cfg.register_cli_opts(opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/agent/dhcp.py neutron-9.0.0~b3~dev557/neutron/conf/agent/dhcp.py --- neutron-9.0.0~b2~dev280/neutron/conf/agent/dhcp.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/agent/dhcp.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,110 @@ +# Copyright 2015 OpenStack Foundation +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg + +from neutron._i18n import _ + + +DHCP_AGENT_OPTS = [ + cfg.IntOpt('resync_interval', default=5, + help=_("The DHCP agent will resync its state with Neutron to " + "recover from any transient notification or RPC errors. " + "The interval is number of seconds between attempts.")), + cfg.StrOpt('dhcp_driver', + default='neutron.agent.linux.dhcp.Dnsmasq', + help=_("The driver used to manage the DHCP server.")), + cfg.BoolOpt('enable_isolated_metadata', default=False, + help=_("The DHCP server can assist with providing metadata " + "support on isolated networks. Setting this value to " + "True will cause the DHCP server to append specific " + "host routes to the DHCP request. The metadata service " + "will only be activated when the subnet does not " + "contain any router port. The guest instance must be " + "configured to request host routes via DHCP (Option " + "121). This option doesn't have any effect when " + "force_metadata is set to True.")), + cfg.BoolOpt('force_metadata', default=False, + help=_("In some cases the Neutron router is not present to " + "provide the metadata IP but the DHCP server can be " + "used to provide this info. Setting this value will " + "force the DHCP server to append specific host routes " + "to the DHCP request. If this option is set, then the " + "metadata service will be activated for all the " + "networks.")), + cfg.BoolOpt('enable_metadata_network', default=False, + help=_("Allows for serving metadata requests coming from a " + "dedicated metadata access network whose CIDR is " + "169.254.169.254/16 (or larger prefix), and is " + "connected to a Neutron router from which the VMs send " + "metadata:1 request. In this case DHCP Option 121 will " + "not be injected in VMs, as they will be able to reach " + "169.254.169.254 through a router. This option " + "requires enable_isolated_metadata = True.")), + cfg.IntOpt('num_sync_threads', default=4, + help=_('Number of threads to use during sync process. ' + 'Should not exceed connection pool size configured on ' + 'server.')) +] + +DHCP_OPTS = [ + cfg.StrOpt('dhcp_confs', + default='$state_path/dhcp', + help=_('Location to store DHCP server config files.')), + cfg.StrOpt('dhcp_domain', + default='openstacklocal', + help=_('Domain to use for building the hostnames. ' + 'This option is deprecated. It has been moved to ' + 'neutron.conf as dns_domain. It will be removed ' + 'in a future release.'), + deprecated_for_removal=True), +] + +DNSMASQ_OPTS = [ + cfg.StrOpt('dnsmasq_config_file', + default='', + help=_('Override the default dnsmasq settings ' + 'with this file.')), + cfg.ListOpt('dnsmasq_dns_servers', + default=[], + help=_('Comma-separated list of the DNS servers which will be ' + 'used as forwarders.')), + cfg.StrOpt('dnsmasq_base_log_dir', + help=_("Base log dir for dnsmasq logging. " + "The log contains DHCP and DNS log information and " + "is useful for debugging issues with either DHCP or " + "DNS. If this section is null, disable dnsmasq log.")), + cfg.BoolOpt('dnsmasq_local_resolv', default=False, + help=_("Enables the dnsmasq service to provide name " + "resolution for instances via DNS resolvers on the " + "host running the DHCP agent. Effectively removes the " + "'--no-resolv' option from the dnsmasq process " + "arguments. Adding custom DNS resolvers to the " + "'dnsmasq_dns_servers' option disables this feature.")), + cfg.IntOpt( + 'dnsmasq_lease_max', + default=(2 ** 24), + help=_('Limit number of leases to prevent a denial-of-service.')), + cfg.BoolOpt('dhcp_broadcast_reply', default=False, + help=_("Use broadcast in DHCP replies.")), +] + + +def register_agent_dhcp_opts(cfg=cfg.CONF): + cfg.register_opts(DHCP_AGENT_OPTS) + cfg.register_opts(DHCP_OPTS) + cfg.register_opts(DNSMASQ_OPTS) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/agent/l3/config.py neutron-9.0.0~b3~dev557/neutron/conf/agent/l3/config.py --- neutron-9.0.0~b2~dev280/neutron/conf/agent/l3/config.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/agent/l3/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,105 @@ +# Copyright (c) 2015 OpenStack Foundation. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants +from oslo_config import cfg + +from neutron._i18n import _ +from neutron.agent.common import config + + +OPTS = [ + cfg.StrOpt('agent_mode', default=constants.L3_AGENT_MODE_LEGACY, + choices=(constants.L3_AGENT_MODE_DVR, + constants.L3_AGENT_MODE_DVR_SNAT, + constants.L3_AGENT_MODE_LEGACY), + help=_("The working mode for the agent. Allowed modes are: " + "'legacy' - this preserves the existing behavior " + "where the L3 agent is deployed on a centralized " + "networking node to provide L3 services like DNAT, " + "and SNAT. Use this mode if you do not want to " + "adopt DVR. 'dvr' - this mode enables DVR " + "functionality and must be used for an L3 agent " + "that runs on a compute host. 'dvr_snat' - this " + "enables centralized SNAT support in conjunction " + "with DVR. This mode must be used for an L3 agent " + "running on a centralized node (or in single-host " + "deployments, e.g. devstack)")), + cfg.PortOpt('metadata_port', + default=9697, + help=_("TCP Port used by Neutron metadata namespace proxy.")), + cfg.IntOpt('send_arp_for_ha', + default=3, + help=_("Send this many gratuitous ARPs for HA setup, if " + "less than or equal to 0, the feature is disabled")), + cfg.BoolOpt('handle_internal_only_routers', + default=True, + help=_("Indicates that this L3 agent should also handle " + "routers that do not have an external network gateway " + "configured. This option should be True only for a " + "single agent in a Neutron deployment, and may be " + "False for all agents if all routers must have an " + "external network gateway.")), + cfg.StrOpt('gateway_external_network_id', default='', + help=_("When external_network_bridge is set, each L3 agent can " + "be associated with no more than one external network. " + "This value should be set to the UUID of that external " + "network. To allow L3 agent support multiple external " + "networks, both the external_network_bridge and " + "gateway_external_network_id must be left empty.")), + cfg.StrOpt('ipv6_gateway', default='', + help=_("With IPv6, the network used for the external gateway " + "does not need to have an associated subnet, since the " + "automatically assigned link-local address (LLA) can " + "be used. However, an IPv6 gateway address is needed " + "for use as the next-hop for the default route. " + "If no IPv6 gateway address is configured here, " + "(and only then) the neutron router will be configured " + "to get its default route from router advertisements " + "(RAs) from the upstream router; in which case the " + "upstream router must also be configured to send " + "these RAs. " + "The ipv6_gateway, when configured, should be the LLA " + "of the interface on the upstream router. If a " + "next-hop using a global unique address (GUA) is " + "desired, it needs to be done via a subnet allocated " + "to the network and not through this parameter. ")), + cfg.StrOpt('prefix_delegation_driver', + default='dibbler', + help=_('Driver used for ipv6 prefix delegation. This needs to ' + 'be an entry point defined in the ' + 'neutron.agent.linux.pd_drivers namespace. See ' + 'setup.cfg for entry points included with the neutron ' + 'source.')), + cfg.BoolOpt('enable_metadata_proxy', default=True, + help=_("Allow running metadata proxy.")), + cfg.StrOpt('metadata_access_mark', + default='0x1', + help=_('Iptables mangle mark used to mark metadata valid ' + 'requests. This mark will be masked with 0xffff so ' + 'that only the lower 16 bits will be used.')), + cfg.StrOpt('external_ingress_mark', + default='0x2', + help=_('Iptables mangle mark used to mark ingress from ' + 'external network. This mark will be masked with ' + '0xffff so that only the lower 16 bits will be used.')), +] + +OPTS += config.EXT_NET_BRIDGE_OPTS + + +def register_l3_agent_config_opts(opts, cfg=cfg.CONF): + cfg.register_opts(opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/agent/l3/keepalived.py neutron-9.0.0~b3~dev557/neutron/conf/agent/l3/keepalived.py --- neutron-9.0.0~b2~dev280/neutron/conf/agent/l3/keepalived.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/agent/l3/keepalived.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,47 @@ +# Copyright (c) 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg + +from neutron._i18n import _ + + +CLI_OPTS = [ + cfg.StrOpt('router_id', help=_('ID of the router')), + cfg.StrOpt('namespace', help=_('Namespace of the router')), + cfg.StrOpt('conf_dir', help=_('Path to the router directory')), + cfg.StrOpt('monitor_interface', help=_('Interface to monitor')), + cfg.StrOpt('monitor_cidr', help=_('CIDR to monitor')), + cfg.StrOpt('pid_file', help=_('Path to PID file for this process')), + cfg.StrOpt('user', help=_('User (uid or name) running this process ' + 'after its initialization')), + cfg.StrOpt('group', help=_('Group (gid or name) running this process ' + 'after its initialization')) +] + +OPTS = [ + cfg.StrOpt('metadata_proxy_socket', + default='$state_path/metadata_proxy', + help=_('Location of Metadata Proxy UNIX domain ' + 'socket')) +] + + +def register_cli_l3_agent_keepalived_opts(conf=cfg.CONF): + conf.register_cli_opts(CLI_OPTS) + + +def register_l3_agent_keepalived_opts(conf=cfg.CONF): + conf.register_opts(OPTS) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/agent/ovs_conf.py neutron-9.0.0~b3~dev557/neutron/conf/agent/ovs_conf.py --- neutron-9.0.0~b2~dev280/neutron/conf/agent/ovs_conf.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/agent/ovs_conf.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,33 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + +# Default timeout for ovs-vsctl command +DEFAULT_OVS_VSCTL_TIMEOUT = 10 + +OPTS = [ + cfg.IntOpt('ovs_vsctl_timeout', + default=DEFAULT_OVS_VSCTL_TIMEOUT, + help=_('Timeout in seconds for ovs-vsctl commands. ' + 'If the timeout expires, ovs commands will fail with ' + 'ALARMCLOCK error.')), +] + + +def register_ovs_agent_opts(cfg=cfg.CONF): + cfg.register_opts(OPTS) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/agent/securitygroups_rpc.py neutron-9.0.0~b3~dev557/neutron/conf/agent/securitygroups_rpc.py --- neutron-9.0.0~b2~dev280/neutron/conf/agent/securitygroups_rpc.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/agent/securitygroups_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,44 @@ +# Copyright 2012, Nachi Ueno, NTT MCL, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + + +from oslo_config import cfg + +from neutron._i18n import _ + + +security_group_opts = [ + cfg.StrOpt( + 'firewall_driver', + help=_('Driver for security groups firewall in the L2 agent')), + cfg.BoolOpt( + 'enable_security_group', + default=True, + help=_( + 'Controls whether the neutron security group API is enabled ' + 'in the server. It should be false when using no security ' + 'groups or using the nova security group API.')), + cfg.BoolOpt( + 'enable_ipset', + default=True, + help=_('Use ipset to speed-up the iptables based security groups. ' + 'Enabling ipset support requires that ipset is installed on L2 ' + 'agent node.')) +] + + +def register_securitygroups_opts(cfg=cfg.CONF): + cfg.register_opts(security_group_opts, 'SECURITYGROUP') diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/common.py neutron-9.0.0~b3~dev557/neutron/conf/common.py --- neutron-9.0.0~b2~dev280/neutron/conf/common.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/common.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,189 @@ +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_config import cfg +from oslo_service import wsgi + +from neutron._i18n import _ +from neutron.common import constants +from neutron.common import utils + + +core_opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0', + help=_("The host IP to bind to")), + cfg.PortOpt('bind_port', default=9696, + help=_("The port to bind to")), + cfg.StrOpt('api_extensions_path', default="", + help=_("The path for API extensions. " + "Note that this can be a colon-separated list of paths. " + "For example: api_extensions_path = " + "extensions:/path/to/more/exts:/even/more/exts. " + "The __path__ of neutron.extensions is appended to " + "this, so if your extensions are in there you don't " + "need to specify them here.")), + cfg.StrOpt('auth_strategy', default='keystone', + help=_("The type of authentication to use")), + cfg.StrOpt('core_plugin', + help=_("The core plugin Neutron will use")), + cfg.ListOpt('service_plugins', default=[], + help=_("The service plugins Neutron will use")), + cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00", + help=_("The base MAC address Neutron will use for VIFs. " + "The first 3 octets will remain unchanged. If the 4th " + "octet is not 00, it will also be used. The others " + "will be randomly generated.")), + cfg.IntOpt('mac_generation_retries', default=16, + deprecated_for_removal=True, + help=_("How many times Neutron will retry MAC generation. This " + "option is now obsolete and so is deprecated to be " + "removed in the Ocata release.")), + cfg.BoolOpt('allow_bulk', default=True, + help=_("Allow the usage of the bulk API")), + cfg.BoolOpt('allow_pagination', default=True, + deprecated_for_removal=True, + help=_("Allow the usage of the pagination. This option has " + "been deprecated and will now be enabled " + "unconditionally.")), + cfg.BoolOpt('allow_sorting', default=True, + deprecated_for_removal=True, + help=_("Allow the usage of the sorting. This option has been " + "deprecated and will now be enabled unconditionally.")), + cfg.StrOpt('pagination_max_limit', default="-1", + help=_("The maximum number of items returned in a single " + "response, value was 'infinite' or negative integer " + "means no limit")), + cfg.ListOpt('default_availability_zones', default=[], + help=_("Default value of availability zone hints. The " + "availability zone aware schedulers use this when " + "the resources availability_zone_hints is empty. " + "Multiple availability zones can be specified by a " + "comma separated string. This value can be empty. " + "In this case, even if availability_zone_hints for " + "a resource is empty, availability zone is " + "considered for high availability while scheduling " + "the resource.")), + cfg.IntOpt('max_dns_nameservers', default=5, + help=_("Maximum number of DNS nameservers per subnet")), + cfg.IntOpt('max_subnet_host_routes', default=20, + help=_("Maximum number of host routes per subnet")), + cfg.IntOpt('max_fixed_ips_per_port', default=5, + deprecated_for_removal=True, + help=_("Maximum number of fixed ips per port. This option " + "is deprecated and will be removed in the N " + "release.")), + cfg.BoolOpt('ipv6_pd_enabled', default=False, + help=_("Enables IPv6 Prefix Delegation for automatic subnet " + "CIDR allocation. " + "Set to True to enable IPv6 Prefix Delegation for " + "subnet allocation in a PD-capable environment. Users " + "making subnet creation requests for IPv6 subnets " + "without providing a CIDR or subnetpool ID will be " + "given a CIDR via the Prefix Delegation mechanism. " + "Note that enabling PD will override the behavior of " + "the default IPv6 subnetpool.")), + cfg.IntOpt('dhcp_lease_duration', default=86400, + deprecated_name='dhcp_lease_time', + help=_("DHCP lease duration (in seconds). Use -1 to tell " + "dnsmasq to use infinite lease times.")), + cfg.StrOpt('dns_domain', + default='openstacklocal', + help=_('Domain to use for building the hostnames')), + cfg.StrOpt('external_dns_driver', + help=_('Driver for external DNS integration.')), + cfg.BoolOpt('dhcp_agent_notification', default=True, + help=_("Allow sending resource operation" + " notification to DHCP agent")), + cfg.BoolOpt('allow_overlapping_ips', default=False, + help=_("Allow overlapping IP support in Neutron. " + "Attention: the following parameter MUST be set to " + "False if Neutron is being used in conjunction with " + "Nova security groups.")), + cfg.StrOpt('host', default=utils.get_hostname(), + sample_default='example.domain', + help=_("Hostname to be used by the Neutron server, agents and " + "services running on this machine. All the agents and " + "services running on this machine must use the same " + "host value.")), + cfg.BoolOpt('notify_nova_on_port_status_changes', default=True, + help=_("Send notification to nova when port status changes")), + cfg.BoolOpt('notify_nova_on_port_data_changes', default=True, + help=_("Send notification to nova when port data (fixed_ips/" + "floatingip) changes so nova can update its cache.")), + cfg.IntOpt('send_events_interval', default=2, + help=_('Number of seconds between sending events to nova if ' + 'there are any events to send.')), + cfg.BoolOpt('advertise_mtu', default=True, + deprecated_for_removal=True, + help=_('If True, advertise network MTU values if core plugin ' + 'calculates them. MTU is advertised to running ' + 'instances via DHCP and RA MTU options.')), + cfg.StrOpt('ipam_driver', default='internal', + help=_("Neutron IPAM (IP address management) driver to use. " + "If ipam_driver is not set (default behavior), no IPAM " + "driver is used. In order to use the reference " + "implementation of Neutron IPAM driver, " + "use 'internal'.")), + cfg.BoolOpt('vlan_transparent', default=False, + help=_('If True, then allow plugins that support it to ' + 'create VLAN transparent networks.')), + cfg.StrOpt('web_framework', default='legacy', + choices=('legacy', 'pecan'), + help=_("This will choose the web framework in which to run " + "the Neutron API server. 'pecan' is a new experimental " + "rewrite of the API server.")), + cfg.IntOpt('global_physnet_mtu', default=constants.DEFAULT_NETWORK_MTU, + deprecated_name='segment_mtu', deprecated_group='ml2', + help=_('MTU of the underlying physical network. Neutron uses ' + 'this value to calculate MTU for all virtual network ' + 'components. For flat and VLAN networks, neutron uses ' + 'this value without modification. For overlay networks ' + 'such as VXLAN, neutron automatically subtracts the ' + 'overlay protocol overhead from this value. Defaults ' + 'to 1500, the standard value for Ethernet.')) +] + +core_cli_opts = [ + cfg.StrOpt('state_path', + default='/var/lib/neutron', + help=_("Where to store Neutron state files. " + "This directory must be writable by the agent.")), +] + + +def register_core_common_config_opts(cfg=cfg.CONF): + cfg.register_opts(core_opts) + cfg.register_cli_opts(core_cli_opts) + wsgi.register_opts(cfg) + + +NOVA_CONF_SECTION = 'nova' + +nova_opts = [ + cfg.StrOpt('region_name', + help=_('Name of nova region to use. Useful if keystone manages' + ' more than one region.')), + cfg.StrOpt('endpoint_type', + default='public', + choices=['public', 'admin', 'internal'], + help=_('Type of the nova endpoint to use. This endpoint will' + ' be looked up in the keystone catalog and should be' + ' one of public, internal or admin.')), +] + + +def register_nova_opts(cfg=cfg.CONF): + cfg.register_opts(nova_opts, group=NOVA_CONF_SECTION) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/extensions/allowedaddresspairs.py neutron-9.0.0~b3~dev557/neutron/conf/extensions/allowedaddresspairs.py --- neutron-9.0.0~b2~dev280/neutron/conf/extensions/allowedaddresspairs.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/extensions/allowedaddresspairs.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,28 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + + +allowed_address_pair_opts = [ + #TODO(limao): use quota framework when it support quota for attributes + cfg.IntOpt('max_allowed_address_pair', default=10, + help=_("Maximum number of allowed address pairs")), +] + + +def register_allowed_address_pair_opts(cfg=cfg.CONF): + cfg.register_opts(allowed_address_pair_opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/plugins/ml2/drivers/agent.py neutron-9.0.0~b3~dev557/neutron/conf/plugins/ml2/drivers/agent.py --- neutron-9.0.0~b2~dev280/neutron/conf/plugins/ml2/drivers/agent.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/plugins/ml2/drivers/agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,52 @@ +# Copyright (c) 2016 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + +agent_opts = [ + cfg.IntOpt('polling_interval', default=2, + help=_("The number of seconds the agent will wait between " + "polling for local device changes.")), + cfg.IntOpt('quitting_rpc_timeout', default=10, + help=_("Set new timeout in seconds for new rpc calls after " + "agent receives SIGTERM. If value is set to 0, rpc " + "timeout won't be changed")), + # TODO(kevinbenton): The following opt is duplicated between the OVS agent + # and the Linuxbridge agent to make it easy to back-port. These shared opts + # should be moved into a common agent config options location as part of + # the deduplication work. + cfg.BoolOpt('prevent_arp_spoofing', default=True, + deprecated_for_removal=True, + help=_("Enable suppression of ARP responses that don't match " + "an IP address that belongs to the port from which " + "they originate. Note: This prevents the VMs attached " + "to this agent from spoofing, it doesn't protect them " + "from other devices which have the capability to spoof " + "(e.g. bare metal or VMs attached to agents without " + "this flag set to True). Spoofing rules will not be " + "added to any ports that have port security disabled. " + "For LinuxBridge, this requires ebtables. For OVS, it " + "requires a version that supports matching ARP " + "headers. This option will be removed in Ocata so " + "the only way to disable protection will be via the " + "port security extension.")) +] + + +def register_agent_opts(cfg=cfg.CONF): + cfg.register_opts(agent_opts, "AGENT") diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/plugins/ml2/drivers/linuxbridge.py neutron-9.0.0~b3~dev557/neutron/conf/plugins/ml2/drivers/linuxbridge.py --- neutron-9.0.0~b2~dev280/neutron/conf/plugins/ml2/drivers/linuxbridge.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/plugins/ml2/drivers/linuxbridge.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,95 @@ +# Copyright 2012 Cisco Systems, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + +DEFAULT_BRIDGE_MAPPINGS = [] +DEFAULT_INTERFACE_MAPPINGS = [] +DEFAULT_VXLAN_GROUP = '224.0.0.1' +DEFAULT_KERNEL_HZ_VALUE = 250 # [Hz] +DEFAULT_TC_TBF_LATENCY = 50 # [ms] + +vxlan_opts = [ + cfg.BoolOpt('enable_vxlan', default=True, + help=_("Enable VXLAN on the agent. Can be enabled when " + "agent is managed by ml2 plugin using linuxbridge " + "mechanism driver")), + cfg.IntOpt('ttl', + help=_("TTL for vxlan interface protocol packets.")), + cfg.IntOpt('tos', + help=_("TOS for vxlan interface protocol packets.")), + cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, + help=_("Multicast group(s) for vxlan interface. A range of " + "group addresses may be specified by using CIDR " + "notation. Specifying a range allows different VNIs to " + "use different group addresses, reducing or eliminating " + "spurious broadcast traffic to the tunnel endpoints. " + "To reserve a unique group for each possible " + "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This " + "setting must be the same on all the agents.")), + cfg.IPOpt('local_ip', + help=_("IP address of local overlay (tunnel) network endpoint. " + "Use either an IPv4 or IPv6 address that resides on one " + "of the host network interfaces. The IP version of this " + "value must match the value of the 'overlay_ip_version' " + "option in the ML2 plug-in configuration file on the " + "neutron server node(s).")), + cfg.BoolOpt('l2_population', default=False, + help=_("Extension to use alongside ml2 plugin's l2population " + "mechanism driver. It enables the plugin to populate " + "VXLAN forwarding table.")), + cfg.BoolOpt('arp_responder', default=False, + help=_("Enable local ARP responder which provides local " + "responses instead of performing ARP broadcast into " + "the overlay. Enabling local ARP responder is not " + "fully compatible with the allowed-address-pairs " + "extension.") + ), +] + +bridge_opts = [ + cfg.ListOpt('physical_interface_mappings', + default=DEFAULT_INTERFACE_MAPPINGS, + help=_("Comma-separated list of " + ": tuples " + "mapping physical network names to the agent's " + "node-specific physical network interfaces to be used " + "for flat and VLAN networks. All physical networks " + "listed in network_vlan_ranges on the server should " + "have mappings to appropriate interfaces on each " + "agent.")), + cfg.ListOpt('bridge_mappings', + default=DEFAULT_BRIDGE_MAPPINGS, + help=_("List of :")), +] + +qos_options = [ + cfg.IntOpt('kernel_hz', default=DEFAULT_KERNEL_HZ_VALUE, + help=_("Value of host kernel tick rate (hz) for calculating " + "minimum burst value in bandwidth limit rules for " + "a port with QoS. See kernel configuration file for " + "HZ value and tc-tbf manual for more information.")), + cfg.IntOpt('tbf_latency', default=DEFAULT_TC_TBF_LATENCY, + help=_("Value of latency (ms) for calculating size of queue " + "for a port with QoS. See tc-tbf manual for more " + "information.")) +] + + +def register_linuxbridge_opts(cfg=cfg.CONF): + cfg.register_opts(vxlan_opts, "VXLAN") + cfg.register_opts(bridge_opts, "LINUX_BRIDGE") + cfg.register_opts(qos_options, "QOS") diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/quota.py neutron-9.0.0~b3~dev557/neutron/conf/quota.py --- neutron-9.0.0~b2~dev280/neutron/conf/quota.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/quota.py 2016-08-03 20:10:33.000000000 +0000 @@ -21,19 +21,12 @@ QUOTA_DB_MODULE = 'neutron.db.quota.driver' QUOTA_DB_DRIVER = '%s.DbQuotaDriver' % QUOTA_DB_MODULE QUOTA_CONF_DRIVER = 'neutron.quota.ConfDriver' -default_quota_items = ['network', 'subnet', 'port'] QUOTAS_CFG_GROUP = 'QUOTAS' # quota_opts from neutron/quota/__init__.py # renamed quota_opts to core_quota_opts core_quota_opts = [ - cfg.ListOpt('quota_items', - default=default_quota_items, - deprecated_for_removal=True, - help=_('Resource name(s) that are supported in quota ' - 'features. This option is now deprecated for ' - 'removal.')), cfg.IntOpt('default_quota', default=-1, help=_('Default number of resource allowed per tenant. ' diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/services/extdns_designate_driver.py neutron-9.0.0~b3~dev557/neutron/conf/services/extdns_designate_driver.py --- neutron-9.0.0~b2~dev280/neutron/conf/services/extdns_designate_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/services/extdns_designate_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,64 @@ +# Copyright (c) 2016 IBM +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + +designate_opts = [ + cfg.StrOpt('url', + help=_('URL for connecting to designate')), + cfg.StrOpt('admin_username', + help=_('Username for connecting to designate in admin ' + 'context')), + cfg.StrOpt('admin_password', + help=_('Password for connecting to designate in admin ' + 'context'), + secret=True), + cfg.StrOpt('admin_tenant_id', + help=_('Tenant id for connecting to designate in admin ' + 'context')), + cfg.StrOpt('admin_tenant_name', + help=_('Tenant name for connecting to designate in admin ' + 'context')), + cfg.StrOpt('admin_auth_url', + help=_('Authorization URL for connecting to designate in admin ' + 'context')), + cfg.BoolOpt('insecure', default=False, + help=_('Skip cert validation for SSL based admin_auth_url')), + cfg.StrOpt('ca_cert', + help=_('CA certificate file to use to verify ' + 'connecting clients')), + cfg.BoolOpt('allow_reverse_dns_lookup', default=True, + help=_('Allow the creation of PTR records')), + cfg.IntOpt('ipv4_ptr_zone_prefix_size', default=24, + help=_('Number of bits in an ipv4 PTR zone that will be considered ' + 'network prefix. It has to align to byte boundary. Minimum ' + 'value is 8. Maximum value is 24. As a consequence, range ' + 'of values is 8, 16 and 24')), + cfg.IntOpt('ipv6_ptr_zone_prefix_size', default=120, + help=_('Number of bits in an ipv6 PTR zone that will be considered ' + 'network prefix. It has to align to nyble boundary. Minimum ' + 'value is 4. Maximum value is 124. As a consequence, range ' + 'of values is 4, 8, 12, 16,..., 124')), + cfg.StrOpt('ptr_zone_email', default='', + help=_('The email address to be used when creating PTR zones. ' + 'If not specified, the email address will be ' + 'admin@')), +] + + +def register_designate_opts(cfg=cfg.CONF): + cfg.register_opts(designate_opts, 'designate') diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/services/metering_agent.py neutron-9.0.0~b3~dev557/neutron/conf/services/metering_agent.py --- neutron-9.0.0~b2~dev280/neutron/conf/services/metering_agent.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/services/metering_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,32 @@ +# Copyright (C) 2013 eNovance SAS +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + +metering_agent_opts = [ + cfg.StrOpt('driver', + default='neutron.services.metering.drivers.noop.' + 'noop_driver.NoopMeteringDriver', + help=_("Metering driver")), + cfg.IntOpt('measure_interval', default=30, + help=_("Interval between two metering measures")), + cfg.IntOpt('report_interval', default=300, + help=_("Interval between two metering reports")), +] + + +def register_metering_agent_opts(cfg=cfg.CONF): + cfg.register_opts(metering_agent_opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/services/provider_configuration.py neutron-9.0.0~b3~dev557/neutron/conf/services/provider_configuration.py --- neutron-9.0.0~b2~dev280/neutron/conf/services/provider_configuration.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/services/provider_configuration.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,29 @@ +# Copyright 2013 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from neutron._i18n import _ + +serviceprovider_opts = [ + cfg.MultiStrOpt('service_provider', default=[], + help=_('Defines providers for advanced services ' + 'using the format: ' + '::[:default]')) +] + + +def register_service_provider_opts(cfg=cfg.CONF): + cfg.register_opts(serviceprovider_opts, 'service_providers') diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/services/qos_driver_manager.py neutron-9.0.0~b3~dev557/neutron/conf/services/qos_driver_manager.py --- neutron-9.0.0~b2~dev280/neutron/conf/services/qos_driver_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/services/qos_driver_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_config import cfg + +from neutron._i18n import _ + +QOS_PLUGIN_OPTS = [ + cfg.ListOpt('notification_drivers', + default=['message_queue'], + help=_('Drivers list to use to send the update notification')), +] + + +def register_qos_plugin_opts(cfg=cfg.CONF): + cfg.register_opts(QOS_PLUGIN_OPTS, "qos") diff -Nru neutron-9.0.0~b2~dev280/neutron/conf/wsgi.py neutron-9.0.0~b3~dev557/neutron/conf/wsgi.py --- neutron-9.0.0~b2~dev280/neutron/conf/wsgi.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/conf/wsgi.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,37 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_service import wsgi + +from neutron._i18n import _ + +socket_opts = [ + cfg.IntOpt('backlog', + default=4096, + help=_("Number of backlog requests to configure " + "the socket with")), + cfg.IntOpt('retry_until_window', + default=30, + help=_("Number of seconds to keep retrying to listen")), + cfg.BoolOpt('use_ssl', + default=False, + help=_('Enable SSL on the API server')), +] + + +def register_socket_opts(cfg=cfg.CONF): + cfg.register_opts(socket_opts) + wsgi.register_opts(cfg) diff -Nru neutron-9.0.0~b2~dev280/neutron/context.py neutron-9.0.0~b3~dev557/neutron/context.py --- neutron-9.0.0~b2~dev280/neutron/context.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/context.py 2016-08-29 20:05:49.000000000 +0000 @@ -32,24 +32,20 @@ """ - def __init__(self, user_id, tenant_id, is_admin=None, roles=None, - timestamp=None, request_id=None, tenant_name=None, - user_name=None, overwrite=True, auth_token=None, + def __init__(self, user_id=None, tenant_id=None, is_admin=None, + timestamp=None, tenant_name=None, user_name=None, is_advsvc=None, **kwargs): """Object initialization. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. - - :param kwargs: Extra arguments that might be present, but we ignore - because they possibly came in from older rpc messages. """ - super(ContextBase, self).__init__(auth_token=auth_token, - user=user_id, tenant=tenant_id, - is_admin=is_admin, - request_id=request_id, - overwrite=overwrite, - roles=roles) + # NOTE(jamielennox): We maintain these arguments in order for tests + # that pass arguments positionally. + kwargs.setdefault('user', user_id) + kwargs.setdefault('tenant', tenant_id) + super(ContextBase, self).__init__(is_admin=is_admin, **kwargs) + self.user_name = user_name self.tenant_name = tenant_name @@ -97,7 +93,15 @@ @classmethod def from_dict(cls, values): - return cls(**values) + return cls(user_id=values.get('user_id', values.get('user')), + tenant_id=values.get('tenant_id', values.get('project_id')), + is_admin=values.get('is_admin'), + roles=values.get('roles'), + timestamp=values.get('timestamp'), + request_id=values.get('request_id'), + tenant_name=values.get('tenant_name'), + user_name=values.get('user_name'), + auth_token=values.get('auth_token')) def elevated(self): """Return a version of this context with admin flag set.""" diff -Nru neutron-9.0.0~b2~dev280/neutron/db/address_scope_db.py neutron-9.0.0~b3~dev557/neutron/db/address_scope_db.py --- neutron-9.0.0~b2~dev280/neutron/db/address_scope_db.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/address_scope_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,29 +12,21 @@ # License for the specific language governing permissions and limitations # under the License. +import sys + from neutron_lib import constants from oslo_utils import uuidutils -import sqlalchemy as sa from sqlalchemy.orm import exc from neutron._i18n import _ from neutron.api.v2 import attributes as attr +from neutron.common import _deprecate from neutron.db import db_base_plugin_v2 -from neutron.db import model_base +from neutron.db.models import address_scope as address_scope_model from neutron.extensions import address_scope as ext_address_scope from neutron.objects import subnetpool as subnetpool_obj -class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasTenant): - """Represents a neutron address scope.""" - - __tablename__ = "address_scopes" - - name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False) - shared = sa.Column(sa.Boolean, nullable=False) - ip_version = sa.Column(sa.Integer(), nullable=False) - - class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase): """Mixin class to add address scope to db_base_plugin_v2.""" @@ -50,7 +42,8 @@ def _get_address_scope(self, context, id): try: - return self._get_by_id(context, AddressScope, id) + return self._get_by_id(context, address_scope_model.AddressScope, + id) except exc.NoResultFound: raise ext_address_scope.AddressScopeNotFound(address_scope_id=id) @@ -83,7 +76,7 @@ 'name': a_s['name'], 'shared': a_s['shared'], 'ip_version': a_s['ip_version']} - address_scope = AddressScope(**pool_args) + address_scope = address_scope_model.AddressScope(**pool_args) context.session.add(address_scope) return self._make_address_scope_dict(address_scope) @@ -108,7 +101,8 @@ sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'addrscope', limit, marker) - collection = self._get_collection(context, AddressScope, + collection = self._get_collection(context, + address_scope_model.AddressScope, self._make_address_scope_dict, filters=filters, fields=fields, sorts=sorts, @@ -118,7 +112,8 @@ return collection def get_address_scopes_count(self, context, filters=None): - return self._get_collection_count(context, AddressScope, + return self._get_collection_count(context, + address_scope_model.AddressScope, filters=filters) def delete_address_scope(self, context, id): @@ -147,3 +142,10 @@ db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attr.NETWORKS, ['_extend_network_dict_address_scope']) + + +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE +_OLD_REF = sys.modules[__name__] +sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), + address_scope_model) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE diff -Nru neutron-9.0.0~b2~dev280/neutron/db/agentschedulers_db.py neutron-9.0.0~b3~dev557/neutron/db/agentschedulers_db.py --- neutron-9.0.0~b2~dev280/neutron/db/agentschedulers_db.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/agentschedulers_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -390,6 +390,9 @@ self._filter_bindings(context, down_bindings)] agents = self.get_agents_db( context, {'agent_type': [constants.AGENT_TYPE_DHCP]}) + if not agents: + # No agents configured so nothing to do. + return active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)] if not active_agents: @@ -438,7 +441,8 @@ "rescheduling")) def get_dhcp_agents_hosting_networks( - self, context, network_ids, active=None, admin_state_up=None): + self, context, network_ids, active=None, admin_state_up=None, + hosts=None): if not network_ids: return [] query = context.session.query(ndab_model.NetworkDhcpAgentBinding) @@ -448,6 +452,8 @@ if network_ids: query = query.filter( ndab_model.NetworkDhcpAgentBinding.network_id.in_(network_ids)) + if hosts: + query = query.filter(agents_db.Agent.host.in_(hosts)) if admin_state_up is not None: query = query.filter(agents_db.Agent.admin_state_up == admin_state_up) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/allowed_address_pairs/models.py neutron-9.0.0~b3~dev557/neutron/db/allowed_address_pairs/models.py --- neutron-9.0.0~b2~dev280/neutron/db/allowed_address_pairs/models.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/allowed_address_pairs/models.py 2016-08-29 20:05:49.000000000 +0000 @@ -10,21 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -import sqlalchemy as sa -from sqlalchemy import orm +import sys -from neutron.db import model_base -from neutron.db import models_v2 +from neutron.common import _deprecate +from neutron.db.models import allowed_address_pair as aap_models -class AllowedAddressPair(model_base.BASEV2): - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True) - ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) - - port = orm.relationship( - models_v2.Port, - backref=orm.backref("allowed_address_pairs", - lazy="joined", cascade="delete")) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE +_OLD_REF = sys.modules[__name__] +sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), aap_models) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE diff -Nru neutron-9.0.0~b2~dev280/neutron/db/api.py neutron-9.0.0~b3~dev557/neutron/db/api.py --- neutron-9.0.0~b2~dev280/neutron/db/api.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/api.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,29 +16,41 @@ import contextlib from debtcollector import moves +from debtcollector import removals +from neutron_lib import exceptions from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db.sqlalchemy import enginefacade +from oslo_log import log as logging from oslo_utils import excutils import osprofiler.sqlalchemy import six import sqlalchemy from sqlalchemy.orm import exc +import traceback -from neutron.common import exceptions from neutron.common import profiler # noqa + +def set_hook(engine): + if cfg.CONF.profiler.enabled and cfg.CONF.profiler.trace_sqlalchemy: + osprofiler.sqlalchemy.add_tracing(sqlalchemy, engine, 'neutron.db') + + context_manager = enginefacade.transaction_context() +context_manager.configure(sqlite_fk=True) +context_manager.append_on_engine_create(set_hook) -_FACADE = None MAX_RETRIES = 10 +LOG = logging.getLogger(__name__) def is_retriable(e): if _is_nested_instance(e, (db_exc.DBDeadlock, exc.StaleDataError, + db_exc.DBConnectionError, db_exc.DBDuplicateEntry, db_exc.RetryRequest)): return True # looking savepoints mangled by deadlocks. see bug/1590298 for details. @@ -47,7 +59,7 @@ is_deadlock = moves.moved_function(is_retriable, 'is_deadlock', __name__, message='use "is_retriable" instead', version='newton', removal_version='ocata') -retry_db_errors = oslo_db_api.wrap_db_retry( +_retry_db_errors = oslo_db_api.wrap_db_retry( max_retries=MAX_RETRIES, retry_interval=0.1, inc_retry_interval=True, @@ -55,6 +67,22 @@ ) +def retry_db_errors(f): + """Log retriable exceptions before retry to help debugging.""" + + @_retry_db_errors + @six.wraps(f) + def wrapped(*args, **kwargs): + try: + return f(*args, **kwargs) + except Exception as e: + with excutils.save_and_reraise_exception(): + if is_retriable(e): + LOG.debug("Retry wrapper got retriable exception: %s", + traceback.format_exc()) + return wrapped + + def reraise_as_retryrequest(f): """Packs retriable exceptions into a RetryRequest.""" @@ -78,49 +106,34 @@ @contextlib.contextmanager -def exc_to_retry(exceptions): +def exc_to_retry(etypes): try: yield except Exception as e: with excutils.save_and_reraise_exception() as ctx: - if _is_nested_instance(e, exceptions): + if _is_nested_instance(e, etypes): ctx.reraise = False raise db_exc.RetryRequest(e) -def _create_facade_lazily(): - global _FACADE - - if _FACADE is None: - context_manager.configure(sqlite_fk=True, **cfg.CONF.database) - _FACADE = context_manager._factory.get_legacy_facade() - - if cfg.CONF.profiler.enabled and cfg.CONF.profiler.trace_sqlalchemy: - osprofiler.sqlalchemy.add_tracing(sqlalchemy, - _FACADE.get_engine(), - "db") - - return _FACADE - - +@removals.remove(version='Newton', removal_version='Ocata') def get_engine(): """Helper method to grab engine.""" - facade = _create_facade_lazily() - return facade.get_engine() + return context_manager.get_legacy_facade().get_engine() +@removals.remove(version='newton', removal_version='Ocata') def dispose(): - # Don't need to do anything if an enginefacade hasn't been created - if _FACADE is not None: - get_engine().pool.dispose() + context_manager.dispose_pool() +#TODO(akamyshnikova): when all places in the code, which use sessions/ +# connections will be updated, this won't be needed def get_session(autocommit=True, expire_on_commit=False, use_slave=False): """Helper method to grab session.""" - facade = _create_facade_lazily() - return facade.get_session(autocommit=autocommit, - expire_on_commit=expire_on_commit, - use_slave=use_slave) + return context_manager.get_legacy_facade().get_session( + autocommit=autocommit, expire_on_commit=expire_on_commit, + use_slave=use_slave) @contextlib.contextmanager diff -Nru neutron-9.0.0~b2~dev280/neutron/db/db_base_plugin_common.py neutron-9.0.0~b3~dev557/neutron/db/db_base_plugin_common.py --- neutron-9.0.0~b2~dev280/neutron/db/db_base_plugin_common.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/db_base_plugin_common.py 2016-08-03 20:10:33.000000000 +0000 @@ -28,6 +28,7 @@ from neutron.common import utils from neutron.db import common_db_mixin from neutron.db import models_v2 +from neutron.objects import subnet as subnet_obj from neutron.objects import subnetpool as subnetpool_obj LOG = logging.getLogger(__name__) @@ -119,8 +120,6 @@ subnet_id=subnet_id ) context.session.add(allocated) - # Flush now to ensure duplicates properly trigger retry - context.session.flush() # NOTE(kevinbenton): We add this to the session info so the sqlalchemy # object isn't immediately garbage collected. Otherwise when the @@ -191,13 +190,6 @@ for ip in port["fixed_ips"]], "device_id": port["device_id"], "device_owner": port["device_owner"]} - if "dns_name" in port: - res["dns_name"] = port["dns_name"] - if "dns_assignment" in port: - res["dns_assignment"] = [{"ip_address": a["ip_address"], - "hostname": a["hostname"], - "fqdn": a["fqdn"]} - for a in port["dns_assignment"]] # Call auxiliary extend functions, if any if process_extensions: self._apply_dict_extend_functions( @@ -233,9 +225,8 @@ return port def _get_dns_by_subnet(self, context, subnet_id): - dns_qry = context.session.query(models_v2.DNSNameServer) - return dns_qry.filter_by(subnet_id=subnet_id).order_by( - models_v2.DNSNameServer.order).all() + return subnet_obj.DNSNameServer.get_objects(context, + subnet_id=subnet_id) def _get_route_by_subnet(self, context, subnet_id): route_qry = context.session.query(models_v2.SubnetRoute) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/db_base_plugin_v2.py neutron-9.0.0~b3~dev557/neutron/db/db_base_plugin_v2.py --- neutron-9.0.0~b2~dev280/neutron/db/db_base_plugin_v2.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/db_base_plugin_v2.py 2016-08-29 20:05:49.000000000 +0000 @@ -353,7 +353,6 @@ 'id': n.get('id') or uuidutils.generate_uuid(), 'name': n['name'], 'admin_state_up': n['admin_state_up'], - 'mtu': n.get('mtu', n_const.DEFAULT_NETWORK_MTU), 'status': n.get('status', constants.NET_STATUS_ACTIVE), 'description': n.get('description')} network = models_v2.Network(**args) @@ -559,8 +558,8 @@ "subnets.") raise exc.BadRequest(resource='subnets', msg=reason) - mode_list = [n_const.IPV6_SLAAC, - n_const.DHCPV6_STATELESS] + mode_list = [constants.IPV6_SLAAC, + constants.DHCPV6_STATELESS] ra_mode = subnet.get('ipv6_ra_mode') if ra_mode not in mode_list: @@ -668,14 +667,6 @@ if subnetpool: return subnetpool['id'] - # Until the default_subnet_pool config options are removed in the N - # release, check for them after get_default_subnetpool returns None. - # TODO(john-davidge): Remove after Mitaka release. - if ip_version == 4 and cfg.CONF.default_ipv4_subnet_pool: - return cfg.CONF.default_ipv4_subnet_pool - if ip_version == 6 and cfg.CONF.default_ipv6_subnet_pool: - return cfg.CONF.default_ipv6_subnet_pool - msg = _('No default subnetpool found for IPv%s') % ip_version raise exc.BadRequest(resource='subnets', msg=msg) @@ -814,6 +805,9 @@ # prefix when they find PD subnet_id in port's # fixed_ip. ip.pop('ip_address', None) + # FIXME(kevinbenton): this should not be calling update_port + # inside of a transaction. + setattr(context, 'GUARD_TRANSACTION', False) self.update_port(context, port['id'], {'port': port}) # Send router_update to l3_agent if routers: @@ -1042,11 +1036,7 @@ address_scope_changed = ( orig_sp.address_scope_id != reader.address_scope_id) - for k, v in dict(reader.subnetpool).items(): - # TODO(ihrachys): we should probably have some helper method in - # base object to update just updatable fields - if k not in subnetpool_obj.SubnetPool.fields_no_update: - orig_sp[k] = v + orig_sp.update_fields(reader.subnetpool) orig_sp.update() if address_scope_changed: @@ -1105,43 +1095,6 @@ def create_port_bulk(self, context, ports): return self._create_bulk('port', context, ports) - def _get_dns_domain(self): - if not cfg.CONF.dns_domain: - return '' - if cfg.CONF.dns_domain.endswith('.'): - return cfg.CONF.dns_domain - return '%s.' % cfg.CONF.dns_domain - - def _get_request_dns_name(self, port): - dns_domain = self._get_dns_domain() - if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): - return port.get('dns_name', '') - return '' - - def _get_dns_names_for_port(self, context, ips, request_dns_name): - dns_assignment = [] - dns_domain = self._get_dns_domain() - if request_dns_name: - request_fqdn = request_dns_name - if not request_dns_name.endswith('.'): - request_fqdn = '%s.%s' % (request_dns_name, dns_domain) - - for ip in ips: - if request_dns_name: - hostname = request_dns_name - fqdn = request_fqdn - else: - hostname = 'host-%s' % ip['ip_address'].replace( - '.', '-').replace(':', '-') - fqdn = hostname - if dns_domain: - fqdn = '%s.%s' % (hostname, dns_domain) - dns_assignment.append({'ip_address': ip['ip_address'], - 'hostname': hostname, - 'fqdn': fqdn}) - - return dns_assignment - def _create_db_port_obj(self, context, port_data): mac_address = port_data.pop('mac_address', None) if mac_address: @@ -1181,11 +1134,6 @@ description=p.get('description')) if p.get('mac_address') is not constants.ATTR_NOT_SPECIFIED: port_data['mac_address'] = p.get('mac_address') - if ('dns-integration' in self.supported_extension_aliases and - 'dns_name' in p): - request_dns_name = self._get_request_dns_name(p) - port_data['dns_name'] = request_dns_name - with context.session.begin(subtransactions=True): # Ensure that the network exists. self._get_network(context, network_id) @@ -1194,15 +1142,7 @@ db_port = self._create_db_port_obj(context, port_data) p['mac_address'] = db_port['mac_address'] - ips = self.ipam.allocate_ips_for_port_and_store(context, port, - port_id) - if ('dns-integration' in self.supported_extension_aliases and - 'dns_name' in p): - dns_assignment = [] - if ips: - dns_assignment = self._get_dns_names_for_port( - context, ips, request_dns_name) - db_port['dns_assignment'] = dns_assignment + self.ipam.allocate_ips_for_port_and_store(context, port, port_id) return db_port def _validate_port_for_update(self, context, db_port, new_port, new_mac): @@ -1221,30 +1161,11 @@ self._check_mac_addr_update(context, db_port, new_mac, current_owner) - def _get_dns_names_for_updated_port(self, context, original_ips, - original_dns_name, request_dns_name, - changes): - if changes.original or changes.add or changes.remove: - return self._get_dns_names_for_port( - context, changes.original + changes.add, - request_dns_name or original_dns_name) - if original_ips: - return self._get_dns_names_for_port( - context, original_ips, - request_dns_name or original_dns_name) - return [] - def update_port(self, context, id, port): new_port = port['port'] with context.session.begin(subtransactions=True): db_port = self._get_port(context, id) - if 'dns-integration' in self.supported_extension_aliases: - original_ips = self._make_fixed_ip_dict(db_port['fixed_ips']) - original_dns_name = db_port.get('dns_name', '') - request_dns_name = self._get_request_dns_name(new_port) - if 'dns_name' in new_port and not request_dns_name: - new_port['dns_name'] = '' new_mac = new_port.get('mac_address') self._validate_port_for_update(context, db_port, new_port, new_mac) # Note: _make_port_dict is called here to load extension data @@ -1259,18 +1180,12 @@ # that in. The problem is that db_base_plugin_common shouldn't # know anything about port binding. This compromise sends IPAM a # port_dict with all of the extension data loaded. - changes = self.ipam.update_port( + self.ipam.update_port( context, old_port_db=db_port, old_port=self._make_port_dict(db_port), new_port=new_port) - if 'dns-integration' in self.supported_extension_aliases: - dns_assignment = self._get_dns_names_for_updated_port( - context, original_ips, original_dns_name, - request_dns_name, changes) result = self._make_port_dict(db_port) - if 'dns-integration' in self.supported_extension_aliases: - result['dns_assignment'] = dns_assignment return result def delete_port(self, context, id): @@ -1293,19 +1208,8 @@ "The port has already been deleted.", port_id) - def _get_dns_name_for_port_get(self, context, port): - if port['fixed_ips']: - return self._get_dns_names_for_port( - context, port['fixed_ips'], - port['dns_name']) - return [] - def get_port(self, context, id, fields=None): port = self._get_port(context, id) - if (('dns-integration' in self.supported_extension_aliases and - 'dns_name' in port)): - port['dns_assignment'] = self._get_dns_name_for_port_get(context, - port) return self._make_port_dict(port, fields) def _get_ports_query(self, context, filters=None, sorts=None, limit=None, @@ -1346,13 +1250,7 @@ sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) - items = [] - for c in query: - if (('dns-integration' in self.supported_extension_aliases and - 'dns_name' in c)): - c['dns_assignment'] = self._get_dns_name_for_port_get(context, - c) - items.append(self._make_port_dict(c, fields)) + items = [self._make_port_dict(c, fields) for c in query] if limit and page_reverse: items.reverse() return items diff -Nru neutron-9.0.0~b2~dev280/neutron/db/dns_db.py neutron-9.0.0~b3~dev557/neutron/db/dns_db.py --- neutron-9.0.0~b2~dev280/neutron/db/dns_db.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/dns_db.py 2016-08-03 20:10:33.000000000 +0000 @@ -94,7 +94,7 @@ nullable=False) previous_dns_domain = sa.Column(sa.String(255), nullable=False) - + dns_name = sa.Column(sa.String(255), nullable=False) # Add a relationship to the Port model in order to instruct # SQLAlchemy to eagerly load this association port = orm.relationship(models_v2.Port, @@ -268,15 +268,17 @@ raise n_exc.BadRequest(resource='floatingip', msg=msg) def _get_internal_port_dns_data(self, context, floatingip_data): - internal_port = context.session.query(models_v2.Port).filter_by( - id=floatingip_data['port_id']).one() - dns_domain = None - if internal_port['dns_name']: - net_dns = context.session.query(NetworkDNSDomain).filter_by( - network_id=internal_port['network_id']).one_or_none() - if net_dns: - dns_domain = net_dns['dns_domain'] - return internal_port['dns_name'], dns_domain + port_dns = context.session.query(PortDNS).filter_by( + port_id=floatingip_data['port_id']).one_or_none() + if not (port_dns and port_dns['dns_name']): + return None, None + net_dns = context.session.query(NetworkDNSDomain).join( + models_v2.Port, NetworkDNSDomain.network_id == + models_v2.Port.network_id).filter_by( + id=floatingip_data['port_id']).one_or_none() + if not net_dns: + return port_dns['dns_name'], None + return port_dns['dns_name'], net_dns['dns_domain'] def _delete_floatingip_from_external_dns_service(self, context, dns_domain, dns_name, records): @@ -287,11 +289,11 @@ LOG.exception(_LE("Error deleting Floating IP data from external " "DNS service. Name: '%(name)s'. Domain: " "'%(domain)s'. IP addresses '%(ips)s'. DNS " - "service driver message '%(message)s'") - % {"name": dns_name, - "domain": dns_domain, - "message": e.msg, - "ips": ', '.join(records)}) + "service driver message '%(message)s'"), + {"name": dns_name, + "domain": dns_domain, + "message": e.msg, + "ips": ', '.join(records)}) def _get_requested_state_for_external_dns_service_create(self, context, floatingip_data, @@ -318,7 +320,7 @@ LOG.exception(_LE("Error publishing floating IP data in external " "DNS service. Name: '%(name)s'. Domain: " "'%(domain)s'. DNS service driver message " - "'%(message)s'") - % {"name": dns_name, - "domain": dns_domain, - "message": e.msg}) + "'%(message)s'"), + {"name": dns_name, + "domain": dns_domain, + "message": e.msg}) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/external_net_db.py neutron-9.0.0~b3~dev557/neutron/db/external_net_db.py --- neutron-9.0.0~b2~dev280/neutron/db/external_net_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/external_net_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,7 +14,7 @@ # under the License. from neutron_lib.api import validators -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from neutron_lib import exceptions as n_exc import sqlalchemy as sa from sqlalchemy import orm @@ -39,7 +39,7 @@ from neutron.plugins.common import constants as service_constants -DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW +DEVICE_OWNER_ROUTER_GW = lib_constants.DEVICE_OWNER_ROUTER_GW class ExternalNetwork(model_base.BASEV2): diff -Nru neutron-9.0.0~b2~dev280/neutron/db/ipam_backend_mixin.py neutron-9.0.0~b3~dev557/neutron/db/ipam_backend_mixin.py --- neutron-9.0.0~b2~dev280/neutron/db/ipam_backend_mixin.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/ipam_backend_mixin.py 2016-08-29 20:05:49.000000000 +0000 @@ -33,11 +33,14 @@ from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.db import db_base_plugin_common +from neutron.db.models import subnet_service_type as sst_model from neutron.db import models_v2 from neutron.db import segments_db from neutron.extensions import portbindings from neutron.extensions import segment +from neutron.ipam import exceptions as ipam_exceptions from neutron.ipam import utils as ipam_utils +from neutron.objects import subnet as subnet_obj from neutron.services.segments import db as segment_svc_db from neutron.services.segments import exceptions as segment_exc @@ -148,14 +151,14 @@ # when update subnet's DNS nameservers. And store new # nameservers with order one by one. for dns in old_dns_list: - context.session.delete(dns) + dns.delete() for order, server in enumerate(new_dns_addr_list): - dns = models_v2.DNSNameServer( - address=server, - order=order, - subnet_id=id) - context.session.add(dns) + dns = subnet_obj.DNSNameServer(context, + address=server, + order=order, + subnet_id=id) + dns.create() del s["dns_nameservers"] return new_dns_addr_list @@ -176,6 +179,19 @@ del s['allocation_pools'] return result_pools + def _update_subnet_service_types(self, context, subnet_id, s): + old_types = context.session.query( + sst_model.SubnetServiceType).filter_by(subnet_id=subnet_id) + for service_type in old_types: + context.session.delete(service_type) + updated_types = s.pop('service_types') + for service_type in updated_types: + new_type = sst_model.SubnetServiceType( + subnet_id=subnet_id, + service_type=service_type) + context.session.add(new_type) + return updated_types + def update_db_subnet(self, context, subnet_id, s, oldpools): changes = {} if "dns_nameservers" in s: @@ -190,6 +206,10 @@ changes['allocation_pools'] = ( self._update_subnet_allocation_pools(context, subnet_id, s)) + if "service_types" in s: + changes['service_types'] = ( + self._update_subnet_service_types(context, subnet_id, s)) + subnet = self._get_subnet(context, subnet_id) subnet.update(s) return subnet, changes @@ -397,21 +417,28 @@ def _get_changed_ips_for_port(self, context, original_ips, new_ips, device_owner): """Calculate changes in IPs for the port.""" + # Collect auto addressed subnet ids that has to be removed on update + delete_subnet_ids = set(ip['subnet_id'] for ip in new_ips + if ip.get('delete_subnet')) + ips = [ip for ip in new_ips + if ip.get('subnet_id') not in delete_subnet_ids] # the new_ips contain all of the fixed_ips that are to be updated - self._validate_max_ips_per_port(new_ips, device_owner) + self._validate_max_ips_per_port(ips, device_owner) add_ips = [] remove_ips = [] + ips_map = {ip['ip_address']: ip for ip in itertools.chain(new_ips, original_ips) if 'ip_address' in ip} new = set() for ip in new_ips: - if 'ip_address' in ip: - new.add(ip['ip_address']) - else: - add_ips.append(ip) + if ip.get('subnet_id') not in delete_subnet_ids: + if 'ip_address' in ip: + new.add(ip['ip_address']) + else: + add_ips.append(ip) # Convert original ip addresses to sets orig = set(ip['ip_address'] for ip in original_ips) @@ -426,11 +453,13 @@ # Mark ip for removing if it is not found in new_ips # and subnet requires ip to be set manually. - # For auto addresses leave ip unchanged + # For auto addressed subnet leave ip unchanged + # unless it is explicitly marked for delete. for ip in remove: subnet_id = ips_map[ip]['subnet_id'] - if self._is_ip_required_by_subnet(context, subnet_id, - device_owner): + ip_required = self._is_ip_required_by_subnet(context, subnet_id, + device_owner) + if ip_required or subnet_id in delete_subnet_ids: remove_ips.append(ips_map[ip]) else: prev_ips.append(ips_map[ip]) @@ -462,6 +491,8 @@ subnet_args['subnetpool_id'], subnet_args['ip_version']) + service_types = subnet_args.pop('service_types', []) + subnet = models_v2.Subnet(**subnet_args) segment_id = subnet_args.get('segment_id') try: @@ -475,11 +506,11 @@ # by one when create subnet with DNS nameservers if validators.is_attr_set(dns_nameservers): for order, server in enumerate(dns_nameservers): - dns = models_v2.DNSNameServer( - address=server, - order=order, - subnet_id=subnet.id) - context.session.add(dns) + dns = subnet_obj.DNSNameServer(context, + address=server, + order=order, + subnet_id=subnet.id) + dns.create() if validators.is_attr_set(host_routes): for rt in host_routes: @@ -489,6 +520,13 @@ nexthop=rt['nexthop']) context.session.add(route) + if validators.is_attr_set(service_types): + for service_type in service_types: + service_type_entry = sst_model.SubnetServiceType( + subnet_id=subnet.id, + service_type=service_type) + context.session.add(service_type_entry) + self.save_allocation_pools(context, subnet, subnet_request.allocation_pools) @@ -526,18 +564,50 @@ fixed_ip_list.append({'subnet_id': subnet['id']}) return fixed_ip_list - def _ipam_get_subnets(self, context, network_id, host): + def _query_filter_service_subnets(self, query, service_type): + ServiceType = sst_model.SubnetServiceType + query = query.add_entity(ServiceType) + query = query.outerjoin(ServiceType) + query = query.filter(or_(ServiceType.service_type.is_(None), + ServiceType.service_type == service_type)) + return query.from_self(models_v2.Subnet) + + def _check_service_subnets(self, query, service_type): + """Raise an exception if empty subnet list is caused by service type""" + if not query.limit(1).count(): + return + query = self._query_filter_service_subnets(query, service_type) + if query.limit(1).count(): + return + raise ipam_exceptions.IpAddressGenerationFailureNoMatchingSubnet() + + def _sort_service_subnets(self, subnets, query, service_type): + """Give priority to subnets with service_types""" + subnets = sorted(subnets, + key=lambda subnet: not subnet.get('service_types')) + if not subnets: + # If we have an empty subnet list, check if it's caused by + # the service type. + self._check_service_subnets(query, service_type) + return subnets + + def _ipam_get_subnets(self, context, network_id, host, service_type=None): Subnet = models_v2.Subnet SegmentHostMapping = segment_svc_db.SegmentHostMapping - query = self._get_collection_query(context, Subnet) - query = query.filter(Subnet.network_id == network_id) + unfiltered_query = self._get_collection_query(context, Subnet) + unfiltered_query = unfiltered_query.filter( + Subnet.network_id == network_id) + query = self._query_filter_service_subnets(unfiltered_query, + service_type) # Note: This seems redundant, but its not. It has to cover cases # where host is None, ATTR_NOT_SPECIFIED, or '' due to differences in # host binding implementations. if not validators.is_attr_set(host) or not host: query = query.filter(Subnet.segment_id.is_(None)) - return [self._make_subnet_dict(c, context=context) for c in query] + return self._sort_service_subnets( + [self._make_subnet_dict(c, context=context) + for c in query], unfiltered_query, service_type) # A host has been provided. Consider these two scenarios # 1. Not a routed network: subnets are not on segments @@ -564,6 +634,7 @@ query = query.filter(Subnet.network_id == network_id) query = query.filter(Subnet.segment_id.isnot(None)) if query.count() == 0: + self._check_service_subnets(unfiltered_query, service_type) return [] # It is a routed network but no subnets found for host raise segment_exc.HostNotConnectedToAnySegment( @@ -580,14 +651,18 @@ raise segment_exc.HostConnectedToMultipleSegments( host=host, network_id=network_id) - return [self._make_subnet_dict(subnet, context=context) - for subnet, _mapping in results] + return self._sort_service_subnets( + [self._make_subnet_dict(subnet, context=context) + for subnet, _mapping in results], + unfiltered_query, service_type) def _make_subnet_args(self, detail, subnet, subnetpool_id): args = super(IpamBackendMixin, self)._make_subnet_args( detail, subnet, subnetpool_id) if validators.is_attr_set(subnet.get(segment.SEGMENT_ID)): args['segment_id'] = subnet[segment.SEGMENT_ID] + if validators.is_attr_set(subnet.get('service_types')): + args['service_types'] = subnet['service_types'] return args def update_port(self, context, old_port_db, old_port, new_port): @@ -614,10 +689,21 @@ new_port.get('mac_address')) fixed_ips_requested = validators.is_attr_set(new_port.get('fixed_ips')) + old_ips = old_port.get('fixed_ips') deferred_ip_allocation = (host and not old_host - and not old_port.get('fixed_ips') + and not old_ips and not fixed_ips_requested) if not deferred_ip_allocation: + # Check that any existing IPs are valid on the new segment + new_host_requested = host and host != old_host + if old_ips and new_host_requested and not fixed_ips_requested: + valid_subnets = self._ipam_get_subnets( + context, old_port['network_id'], host) + valid_subnet_ids = {s['id'] for s in valid_subnets} + for fixed_ip in old_ips: + if fixed_ip['subnet_id'] not in valid_subnet_ids: + raise segment_exc.HostNotCompatibleWithFixedIps( + host=host, port_id=old_port['id']) return changes # Allocate as if this were the port create. diff -Nru neutron-9.0.0~b2~dev280/neutron/db/ipam_non_pluggable_backend.py neutron-9.0.0~b3~dev557/neutron/db/ipam_non_pluggable_backend.py --- neutron-9.0.0~b2~dev280/neutron/db/ipam_non_pluggable_backend.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/ipam_non_pluggable_backend.py 2016-08-29 20:05:49.000000000 +0000 @@ -67,7 +67,7 @@ subnet_ip_pools.add(netaddr.IPRange(ip_pool.first_ip, ip_pool.last_ip)) - for subnet_id in ip_pools: + for subnet_id in subnet_id_list: subnet_ip_pools = ip_pools[subnet_id] subnet_ip_allocs = ip_allocations[subnet_id] filter_set = netaddr.IPSet() @@ -282,7 +282,8 @@ p = port['port'] subnets = self._ipam_get_subnets(context, network_id=p['network_id'], - host=p.get(portbindings.HOST_ID)) + host=p.get(portbindings.HOST_ID), + service_type=p.get('device_owner')) v4, v6_stateful, v6_stateless = self._classify_subnets( context, subnets) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/ipam_pluggable_backend.py neutron-9.0.0~b3~dev557/neutron/db/ipam_pluggable_backend.py --- neutron-9.0.0~b2~dev280/neutron/db/ipam_pluggable_backend.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/ipam_pluggable_backend.py 2016-08-29 20:05:49.000000000 +0000 @@ -74,6 +74,9 @@ ip) except Exception: with excutils.save_and_reraise_exception(): + if not ipam_driver.needs_rollback(): + return + LOG.debug("An exception occurred during IP deallocation.") if revert_on_fail and deallocated: LOG.debug("Reverting deallocation") @@ -124,6 +127,9 @@ 'subnet_id': subnet_id}) except Exception: with excutils.save_and_reraise_exception(): + if not ipam_driver.needs_rollback(): + return + LOG.debug("An exception occurred during IP allocation.") if revert_on_fail and allocated: @@ -174,9 +180,12 @@ except Exception: with excutils.save_and_reraise_exception(): if ips: + ipam_driver = driver.Pool.get_instance(None, context) + if not ipam_driver.needs_rollback(): + return + LOG.debug("An exception occurred during port creation. " "Reverting IP allocation") - ipam_driver = driver.Pool.get_instance(None, context) self._safe_rollback(self._ipam_deallocate_ips, context, ipam_driver, port_copy['port'], ips, revert_on_fail=False) @@ -191,7 +200,8 @@ p = port['port'] subnets = self._ipam_get_subnets(context, network_id=p['network_id'], - host=p.get(portbindings.HOST_ID)) + host=p.get(portbindings.HOST_ID), + service_type=p.get('device_owner')) v4, v6_stateful, v6_stateless = self._classify_subnets( context, subnets) @@ -333,8 +343,11 @@ except Exception: with excutils.save_and_reraise_exception(): if 'fixed_ips' in new_port: - LOG.debug("An exception occurred during port update.") ipam_driver = driver.Pool.get_instance(None, context) + if not ipam_driver.needs_rollback(): + return + + LOG.debug("An exception occurred during port update.") if changes.add: LOG.debug("Reverting IP allocation.") self._safe_rollback(self._ipam_deallocate_ips, @@ -464,6 +477,9 @@ # IPAM part rolled back in exception handling # and subnet part is rolled back by transaction rollback. with excutils.save_and_reraise_exception(): + if not ipam_driver.needs_rollback(): + return + LOG.debug("An exception occurred during subnet creation. " "Reverting subnet allocation.") self._safe_rollback(self.delete_subnet, diff -Nru neutron-9.0.0~b2~dev280/neutron/db/l3_agentschedulers_db.py neutron-9.0.0~b3~dev557/neutron/db/l3_agentschedulers_db.py --- neutron-9.0.0~b2~dev280/neutron/db/l3_agentschedulers_db.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/l3_agentschedulers_db.py 2016-08-03 20:10:33.000000000 +0000 @@ -28,7 +28,6 @@ from sqlalchemy import sql from neutron._i18n import _, _LI -from neutron.common import constants as n_const from neutron.common import utils as n_utils from neutron.db import agents_db from neutron.db import agentschedulers_db @@ -126,8 +125,8 @@ def _get_agent_mode(self, agent_db): agent_conf = self.get_configuration_dict(agent_db) - return agent_conf.get(n_const.L3_AGENT_MODE, - n_const.L3_AGENT_MODE_LEGACY) + return agent_conf.get(constants.L3_AGENT_MODE, + constants.L3_AGENT_MODE_LEGACY) def validate_agent_router_combination(self, context, agent, router): """Validate if the router can be correctly assigned to the agent. @@ -144,10 +143,10 @@ agent_mode = self._get_agent_mode(agent) - if agent_mode == n_const.L3_AGENT_MODE_DVR: + if agent_mode == constants.L3_AGENT_MODE_DVR: raise l3agentscheduler.DVRL3CannotAssignToDvrAgent() - if (agent_mode == n_const.L3_AGENT_MODE_LEGACY and + if (agent_mode == constants.L3_AGENT_MODE_LEGACY and router.get('distributed')): raise l3agentscheduler.RouterL3AgentMismatch( router_id=router['id'], agent_id=agent['id']) @@ -227,7 +226,7 @@ """ agent = self._get_agent(context, agent_id) agent_mode = self._get_agent_mode(agent) - if agent_mode == n_const.L3_AGENT_MODE_DVR: + if agent_mode == constants.L3_AGENT_MODE_DVR: raise l3agentscheduler.DVRL3CannotRemoveFromDvrAgent() self._unbind_router(context, router_id, agent_id) @@ -462,10 +461,10 @@ continue agent_conf = self.get_configuration_dict(l3_agent) - agent_mode = agent_conf.get(n_const.L3_AGENT_MODE, - n_const.L3_AGENT_MODE_LEGACY) - if (agent_mode == n_const.L3_AGENT_MODE_DVR or - (agent_mode == n_const.L3_AGENT_MODE_LEGACY and + agent_mode = agent_conf.get(constants.L3_AGENT_MODE, + constants.L3_AGENT_MODE_LEGACY) + if (agent_mode == constants.L3_AGENT_MODE_DVR or + (agent_mode == constants.L3_AGENT_MODE_LEGACY and is_router_distributed)): continue diff -Nru neutron-9.0.0~b2~dev280/neutron/db/l3_db.py neutron-9.0.0~b3~dev557/neutron/db/l3_db.py --- neutron-9.0.0~b2~dev280/neutron/db/l3_db.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/l3_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,11 +12,13 @@ # License for the specific language governing permissions and limitations # under the License. +import functools import itertools +from debtcollector import removals import netaddr from neutron_lib.api import validators -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils @@ -37,9 +39,12 @@ from neutron.common import ipv6_utils from neutron.common import rpc as n_rpc from neutron.common import utils +from neutron.db import api as db_api +from neutron.db import common_db_mixin from neutron.db import l3_agentschedulers_db as l3_agt from neutron.db import model_base from neutron.db import models_v2 +from neutron.db import standard_attr from neutron.db import standardattrdescription_db as st_attr from neutron.extensions import external_net from neutron.extensions import l3 @@ -50,10 +55,10 @@ LOG = logging.getLogger(__name__) -DEVICE_OWNER_HA_REPLICATED_INT = l3_constants.DEVICE_OWNER_HA_REPLICATED_INT -DEVICE_OWNER_ROUTER_INTF = l3_constants.DEVICE_OWNER_ROUTER_INTF -DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW -DEVICE_OWNER_FLOATINGIP = l3_constants.DEVICE_OWNER_FLOATINGIP +DEVICE_OWNER_HA_REPLICATED_INT = lib_constants.DEVICE_OWNER_HA_REPLICATED_INT +DEVICE_OWNER_ROUTER_INTF = lib_constants.DEVICE_OWNER_ROUTER_INTF +DEVICE_OWNER_ROUTER_GW = lib_constants.DEVICE_OWNER_ROUTER_GW +DEVICE_OWNER_FLOATINGIP = lib_constants.DEVICE_OWNER_FLOATINGIP EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO # Maps API field to DB column @@ -71,7 +76,9 @@ port_id = sa.Column( sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) + primary_key=True, + unique=True) + revises_on_change = ('router', ) # The port_type attribute is redundant as the port table already specifies # it in DEVICE_OWNER.However, this redundancy enables more efficient # queries on router ports, and also prevents potential error-prone @@ -84,8 +91,8 @@ lazy='joined') -class Router(model_base.HasStandardAttributes, model_base.BASEV2, - model_base.HasId, model_base.HasTenant): +class Router(standard_attr.HasStandardAttributes, model_base.BASEV2, + model_base.HasId, model_base.HasProject): """Represents a v2 neutron router.""" name = sa.Column(sa.String(attributes.NAME_MAX_LEN)) @@ -93,6 +100,8 @@ admin_state_up = sa.Column(sa.Boolean) gw_port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id')) gw_port = orm.relationship(models_v2.Port, lazy='joined') + flavor_id = sa.Column(sa.String(36), + sa.ForeignKey("flavors.id"), nullable=True) attached_ports = orm.relationship( RouterPort, backref='router', @@ -102,8 +111,8 @@ secondary=l3_agt.RouterL3AgentBinding.__table__) -class FloatingIP(model_base.HasStandardAttributes, model_base.BASEV2, - model_base.HasId, model_base.HasTenant): +class FloatingIP(standard_attr.HasStandardAttributes, model_base.BASEV2, + model_base.HasId, model_base.HasProject): """Represents a floating IP address. This IP address may or may not be allocated to a tenant, and may or @@ -148,6 +157,21 @@ _dns_integration = None + # NOTE(armax): multiple l3 service plugins (potentially out of tree) + # inherit from l3_db and may need the callbacks to be processed. Having + # an implicit subscription (through the __new__ method) preserves the + # existing behavior, and at the same time it avoids fixing it manually + # in each and every l3 plugin out there. + def __new__(cls): + L3_NAT_dbonly_mixin._subscribe_callbacks() + return super(L3_NAT_dbonly_mixin, cls).__new__(cls) + + @staticmethod + def _subscribe_callbacks(): + registry.subscribe( + _prevent_l3_port_delete_callback, resources.PORT, + events.BEFORE_DELETE) + @property def _is_dns_integration_supported(self): if self._dns_integration is None: @@ -230,18 +254,23 @@ router_id=router['id'], router_db=router_db) return router_db + def _update_gw_for_create_router(self, context, gw_info, router_id): + if gw_info: + router_db = self._get_router(context, router_id) + self._update_router_gw_info(context, router_id, + gw_info, router=router_db) + def create_router(self, context, router): r = router['router'] gw_info = r.pop(EXTERNAL_GW_INFO, None) - router_db = self._create_router_db(context, r, r['tenant_id']) - try: - if gw_info: - self._update_router_gw_info(context, router_db['id'], - gw_info, router=router_db) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.debug("Could not update gateway info, deleting router.") - self.delete_router(context, router_db.id) + create = functools.partial(self._create_router_db, context, r, + r['tenant_id']) + delete = functools.partial(self.delete_router, context) + update_gw = functools.partial(self._update_gw_for_create_router, + context, gw_info) + router_db, _unused = common_db_mixin.safe_creation(context, create, + delete, update_gw, + transaction=False) return self._make_router_dict(router_db) @@ -260,12 +289,12 @@ def update_router(self, context, id, router): r = router['router'] - gw_info = r.pop(EXTERNAL_GW_INFO, l3_constants.ATTR_NOT_SPECIFIED) + gw_info = r.pop(EXTERNAL_GW_INFO, lib_constants.ATTR_NOT_SPECIFIED) # check whether router needs and can be rescheduled to the proper # l3 agent (associated with given external network); # do check before update in DB as an exception will be raised # in case no proper l3 agent found - if gw_info != l3_constants.ATTR_NOT_SPECIFIED: + if gw_info != lib_constants.ATTR_NOT_SPECIFIED: candidates = self._check_router_needs_rescheduling( context, id, gw_info) # Update the gateway outside of the DB update since it involves L2 @@ -313,7 +342,7 @@ constants.L3_ROUTER_NAT) if (not utils.is_extension_supported( l3_plugin, - l3_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or + lib_constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or l3_plugin.router_scheduler is None): # that might mean that we are dealing with non-agent-based # implementation of l3 services @@ -325,7 +354,7 @@ ext_net_id = agent['configurations'].get( 'gateway_external_network_id') ext_bridge = agent['configurations'].get( - 'external_network_bridge', 'br-ex') + 'external_network_bridge', '') if (ext_net_id == network_id or (not ext_net_id and not ext_bridge)): return @@ -350,7 +379,7 @@ # Port has no 'tenant-id', as it is hidden from user port_data = {'tenant_id': '', # intentionally not set 'network_id': network_id, - 'fixed_ips': ext_ips or l3_constants.ATTR_NOT_SPECIFIED, + 'fixed_ips': ext_ips or lib_constants.ATTR_NOT_SPECIFIED, 'device_id': router['id'], 'device_owner': DEVICE_OWNER_ROUTER_GW, 'admin_state_up': True, @@ -361,17 +390,18 @@ if not gw_port['fixed_ips']: LOG.debug('No IPs available for external network %s', network_id) - - with context.session.begin(subtransactions=True): - router.gw_port = self._core_plugin._get_port(context.elevated(), - gw_port['id']) - router_port = RouterPort( - router_id=router.id, - port_id=gw_port['id'], - port_type=DEVICE_OWNER_ROUTER_GW - ) - context.session.add(router) - context.session.add(router_port) + with p_utils.delete_port_on_error(self._core_plugin, + context.elevated(), gw_port['id']): + with context.session.begin(subtransactions=True): + router.gw_port = self._core_plugin._get_port( + context.elevated(), gw_port['id']) + router_port = RouterPort( + router_id=router.id, + port_id=gw_port['id'], + port_type=DEVICE_OWNER_ROUTER_GW + ) + context.session.add(router) + context.session.add(router_port) def _validate_gw_info(self, context, gw_port, info, ext_ips): network_id = info['network_id'] if info else None @@ -393,6 +423,13 @@ raise n_exc.BadRequest(resource='router', msg=msg) return network_id + # NOTE(yamamoto): This method is an override point for plugins + # inheriting this class. Do not optimize this out. + def router_gw_port_has_floating_ips(self, context, router_id): + """Return True if the router's gateway port is serving floating IPs.""" + return bool(self.get_floatingips_count(context, + {'router_id': [router_id]})) + def _delete_current_gw_port(self, context, router_id, router, new_network_id): """Delete gw port if attached to an old network.""" @@ -403,8 +440,7 @@ admin_ctx = context.elevated() old_network_id = router.gw_port['network_id'] - if self.get_floatingips_count( - admin_ctx, {'router_id': [router_id]}): + if self.router_gw_port_has_floating_ips(admin_ctx, router_id): raise l3.RouterExternalGatewayInUseByFloatingIp( router_id=router_id, net_id=router.gw_port['network_id']) gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips] @@ -625,16 +661,8 @@ raise n_exc.BadRequest(resource='router', msg=msg) return port - def _add_interface_by_port(self, context, router, port_id, owner): - # Update owner before actual process in order to avoid the - # case where a port might get attached to a router without the - # owner successfully updating due to an unavailable backend. - self._check_router_port(context, port_id, '') - self._core_plugin.update_port( - context, port_id, {'port': {'device_id': router.id, - 'device_owner': owner}}) - - with context.session.begin(subtransactions=True): + def _validate_router_port_info(self, context, router, port_id): + with db_api.autonested_transaction(context.session): # check again within transaction to mitigate race port = self._check_router_port(context, port_id, router.id) @@ -670,6 +698,23 @@ raise n_exc.BadRequest(resource='router', msg=msg) return port, subnets + def _add_interface_by_port(self, context, router, port_id, owner): + # Update owner before actual process in order to avoid the + # case where a port might get attached to a router without the + # owner successfully updating due to an unavailable backend. + port = self._check_router_port(context, port_id, '') + prev_owner = port['device_owner'] + self._core_plugin.update_port( + context, port_id, {'port': {'device_id': router.id, + 'device_owner': owner}}) + try: + return self._validate_router_port_info(context, router, port_id) + except Exception: + with excutils.save_and_reraise_exception(): + self._core_plugin.update_port( + context, port_id, {'port': {'device_id': '', + 'device_owner': prev_owner}}) + def _port_has_ipv6_address(self, port): for fixed_ip in port['fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: @@ -739,6 +784,7 @@ # This should be True unless adding an IPv6 prefix to an existing port new_port = True + cleanup_port = False if add_by_port: port, subnets = self._add_interface_by_port( @@ -748,15 +794,26 @@ else: port, subnets, new_port = self._add_interface_by_subnet( context, router, interface_info['subnet_id'], device_owner) + cleanup_port = new_port # only cleanup port we created if new_port: - with context.session.begin(subtransactions=True): - router_port = RouterPort( - port_id=port['id'], - router_id=router.id, - port_type=device_owner - ) - context.session.add(router_port) + with p_utils.delete_port_on_error(self._core_plugin, + context, port['id']) as delmgr: + delmgr.delete_on_error = cleanup_port + with context.session.begin(subtransactions=True): + router_port = RouterPort( + port_id=port['id'], + router_id=router.id, + port_type=device_owner + ) + context.session.add(router_port) + # Update owner after actual process again in order to + # make sure the records in routerports table and ports + # table are consistent. + self._core_plugin.update_port( + context, port['id'], {'port': { + 'device_id': router.id, + 'device_owner': device_owner}}) gw_ips = [] gw_network_id = None @@ -962,11 +1019,11 @@ RouterPort.router_id, models_v2.IPAllocation.ip_address).join( models_v2.Port, models_v2.IPAllocation).filter( models_v2.Port.network_id == internal_port['network_id'], - RouterPort.port_type.in_(l3_constants.ROUTER_INTERFACE_OWNERS), + RouterPort.port_type.in_(lib_constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( gw_port.network_id == external_network_id, - gw_port.device_owner == l3_constants.DEVICE_OWNER_ROUTER_GW + gw_port.device_owner == lib_constants.DEVICE_OWNER_ROUTER_GW ).distinct() first_router_id = None @@ -1017,7 +1074,7 @@ data = {'floatingip_id': fip['id'], 'internal_ip': internal_ip_address} msg = (_('Floating IP %(floatingip_id)s is associated ' - 'with non-IPv4 address %s(internal_ip)s and ' + 'with non-IPv4 address %(internal_ip)s and ' 'therefore cannot be bound.') % data) else: msg = (_('Cannot create floating IP and bind it to %s, ' @@ -1075,6 +1132,11 @@ context, fip, floatingip_db['floating_network_id']) + + if port_id == floatingip_db.fixed_port_id: + # Floating IP association is not changed. + return port_id, internal_ip_address, router_id + fip_qry = context.session.query(FloatingIP) try: fip_qry.filter_by( @@ -1110,7 +1172,7 @@ gw_port = router.gw_port for fixed_ip in gw_port.fixed_ips: addr = netaddr.IPAddress(fixed_ip.ip_address) - if addr.version == l3_constants.IP_VERSION_4: + if addr.version == lib_constants.IP_VERSION_4: next_hop = fixed_ip.ip_address break args = {'fixed_ip_address': internal_ip_address, @@ -1131,7 +1193,7 @@ return any(s.ip_version == 4 for s in net.subnets) def _create_floatingip(self, context, floatingip, - initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + initial_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): fip = floatingip['floatingip'] fip_id = uuidutils.generate_uuid() @@ -1154,7 +1216,7 @@ 'admin_state_up': True, 'device_id': fip_id, 'device_owner': DEVICE_OWNER_FLOATINGIP, - 'status': l3_constants.PORT_STATUS_NOTAPPLICABLE, + 'status': lib_constants.PORT_STATUS_NOTAPPLICABLE, 'name': ''} if fip.get('floating_ip_address'): port['fixed_ips'] = [ @@ -1166,6 +1228,8 @@ # 'status' in port dict could not be updated by default, use # check_allow_post to stop the verification of system + # TODO(kevinbenton): move this out of transaction + setattr(context, 'GUARD_TRANSACTION', False) external_port = p_utils.create_port(self._core_plugin, context.elevated(), {'port': port}, @@ -1205,7 +1269,7 @@ return floatingip_dict def create_floatingip(self, context, floatingip, - initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + initial_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): return self._create_floatingip(context, floatingip, initial_status) def _update_floatingip(self, context, id, floatingip): @@ -1219,10 +1283,10 @@ self._update_fip_assoc(context, fip, floatingip_db, self._core_plugin.get_port( context.elevated(), fip_port_id)) - floatingip_dict = self._make_floatingip_dict(floatingip_db) if self._is_dns_integration_supported: dns_data = self._process_dns_floatingip_update_precommit( - context, floatingip_dict) + context, floatingip_db) + floatingip_dict = self._make_floatingip_dict(floatingip_db) if self._is_dns_integration_supported: self._process_dns_floatingip_update_postcommit(context, floatingip_dict, @@ -1357,13 +1421,13 @@ raise n_exc.ServicePortInUse(port_id=port['id'], reason=reason) - def disassociate_floatingips(self, context, port_id): + def disassociate_floatingips(self, context, port_id, do_notify=True): """Disassociate all floating IPs linked to specific port. @param port_id: ID of the port to disassociate floating IPs. @param do_notify: whether we should notify routers right away. + This parameter is ignored. @return: set of router-ids that require notification updates - if do_notify is False, otherwise None. """ router_ids = set() @@ -1543,8 +1607,8 @@ port['subnets'] = [] port['extra_subnets'] = [] - port['address_scopes'] = {l3_constants.IP_VERSION_4: None, - l3_constants.IP_VERSION_6: None} + port['address_scopes'] = {lib_constants.IP_VERSION_4: None, + lib_constants.IP_VERSION_6: None} scopes = {} for subnet in subnets_by_network[port['network_id']]: @@ -1579,18 +1643,18 @@ for floating_ip in floating_ips: router = routers_dict.get(floating_ip['router_id']) if router: - router_floatingips = router.get(l3_constants.FLOATINGIP_KEY, + router_floatingips = router.get(lib_constants.FLOATINGIP_KEY, []) router_floatingips.append(floating_ip) - router[l3_constants.FLOATINGIP_KEY] = router_floatingips + router[lib_constants.FLOATINGIP_KEY] = router_floatingips def _process_interfaces(self, routers_dict, interfaces): for interface in interfaces: router = routers_dict.get(interface['device_id']) if router: - router_interfaces = router.get(l3_constants.INTERFACE_KEY, []) + router_interfaces = router.get(lib_constants.INTERFACE_KEY, []) router_interfaces.append(interface) - router[l3_constants.INTERFACE_KEY] = router_interfaces + router[lib_constants.INTERFACE_KEY] = router_interfaces def _get_router_info_list(self, context, router_ids=None, active=None, device_owners=None): @@ -1620,6 +1684,27 @@ class L3RpcNotifierMixin(object): """Mixin class to add rpc notifier attribute to db_base_plugin_v2.""" + # NOTE(armax): multiple l3 service plugins (potentially out of tree) + # inherit from l3_db and may need the callbacks to be processed. Having + # an implicit subscription (through the __new__ method) preserves the + # existing behavior, and at the same time it avoids fixing it manually + # in each and every l3 plugin out there. + def __new__(cls): + L3RpcNotifierMixin._subscribe_callbacks() + return object.__new__(cls) + + @staticmethod + def _subscribe_callbacks(): + registry.subscribe( + _notify_routers_callback, resources.PORT, events.AFTER_DELETE) + registry.subscribe( + _notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY, + events.AFTER_UPDATE) + registry.subscribe( + _notify_subnetpool_address_scope_update, + resources.SUBNETPOOL_ADDRESS_SCOPE, + events.AFTER_UPDATE) + @property def l3_rpc_notifier(self): if not hasattr(self, '_l3_rpc_notifier'): @@ -1696,7 +1781,7 @@ return router_interface_info def create_floatingip(self, context, floatingip, - initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + initial_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip( context, floatingip, initial_status) router_id = floatingip_dict['router_id'] @@ -1726,7 +1811,7 @@ if do_notify is False, otherwise None. """ router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips( - context, port_id) + context, port_id, do_notify) if do_notify: self.notify_routers_updated(context, router_ids) # since caller assumes that we handled notifications on its @@ -1768,7 +1853,7 @@ subnet_id = kwargs['subnet_id'] query = context.session.query(models_v2.Port).filter_by( network_id=network_id, - device_owner=l3_constants.DEVICE_OWNER_ROUTER_GW) + device_owner=lib_constants.DEVICE_OWNER_ROUTER_GW) query = query.join(models_v2.Port.fixed_ips).filter( models_v2.IPAllocation.subnet_id == subnet_id) router_ids = set(port['device_id'] for port in query) @@ -1797,24 +1882,9 @@ l3plugin.notify_routers_updated(context, router_ids) +@removals.remove( + message="This will be removed in the P cycle. " + "Subscriptions are now registered during object creation." +) def subscribe(): - registry.subscribe( - _prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE) - registry.subscribe( - _notify_routers_callback, resources.PORT, events.AFTER_DELETE) - registry.subscribe( - _notify_subnet_gateway_ip_update, resources.SUBNET_GATEWAY, - events.AFTER_UPDATE) - registry.subscribe( - _notify_subnetpool_address_scope_update, - resources.SUBNETPOOL_ADDRESS_SCOPE, - events.AFTER_UPDATE) - -# NOTE(armax): multiple l3 service plugins (potentially out of tree) inherit -# from l3_db and may need the callbacks to be processed. Having an implicit -# subscription (through the module import) preserves the existing behavior, -# and at the same time it avoids fixing it manually in each and every l3 plugin -# out there. That said, The subscription is also made explicit in the -# reference l3 plugin. The subscription operation is idempotent so there is no -# harm in registering the same callback multiple times. -subscribe() + pass diff -Nru neutron-9.0.0~b2~dev280/neutron/db/l3_dvr_db.py neutron-9.0.0~b3~dev557/neutron/db/l3_dvr_db.py --- neutron-9.0.0~b2~dev280/neutron/db/l3_dvr_db.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/l3_dvr_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,17 +22,18 @@ from oslo_utils import excutils import six -from neutron._i18n import _, _LI, _LW +from neutron._i18n import _, _LE, _LI, _LW from neutron.callbacks import events from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import constants as l3_const from neutron.common import utils as n_utils -from neutron.db.allowed_address_pairs import models as addr_pair_db +from neutron.db import api as db_api from neutron.db import l3_agentschedulers_db as l3_sched_db from neutron.db import l3_attrs_db from neutron.db import l3_db +from neutron.db.models import allowed_address_pair as aap_models from neutron.db import models_v2 from neutron.extensions import l3 from neutron.extensions import portbindings @@ -211,8 +212,8 @@ """Return all active ports associated with the allowed_addr_pair ip.""" query = context.session.query( models_v2.Port).filter( - models_v2.Port.id == addr_pair_db.AllowedAddressPair.port_id, - addr_pair_db.AllowedAddressPair.ip_address == fixed_ip, + models_v2.Port.id == aap_models.AllowedAddressPair.port_id, + aap_models.AllowedAddressPair.ip_address == fixed_ip, models_v2.Port.network_id == network_id, models_v2.Port.admin_state_up == True) # noqa return query.all() @@ -242,6 +243,10 @@ # for the same host. Until we find a good solution for # augmenting multiple server requests we should use the # existing flow. + # FIXME(kevinbenton): refactor so this happens outside + # of floating IP transaction since it creates a port + # via ML2. + setattr(admin_ctx, 'GUARD_TRANSACTION', False) fip_agent_port = ( self.create_fip_agent_gw_port_if_not_exists( admin_ctx, external_port['network_id'], @@ -307,6 +312,7 @@ # This should be True unless adding an IPv6 prefix to an existing port new_port = True + cleanup_port = False if add_by_port: port, subnets = self._add_interface_by_port( @@ -314,31 +320,34 @@ elif add_by_sub: port, subnets, new_port = self._add_interface_by_subnet( context, router, interface_info['subnet_id'], device_owner) + cleanup_port = new_port subnet = subnets[0] if new_port: - if router.extra_attributes.distributed and router.gw_port: - try: + with p_utils.delete_port_on_error(self._core_plugin, + context, port['id']) as delmgr: + delmgr.delete_on_error = cleanup_port + if router.extra_attributes.distributed and router.gw_port: admin_context = context.elevated() self._add_csnat_router_interface_port( admin_context, router, port['network_id'], port['fixed_ips'][-1]['subnet_id']) - except Exception: - with excutils.save_and_reraise_exception(): - # we need to preserve the original state prior - # the request by rolling back the port creation - # that led to new_port=True - self._core_plugin.delete_port( - admin_context, port['id'], l3_port_check=False) - - with context.session.begin(subtransactions=True): - router_port = l3_db.RouterPort( - port_id=port['id'], - router_id=router.id, - port_type=device_owner - ) - context.session.add(router_port) + + with context.session.begin(subtransactions=True): + router_port = l3_db.RouterPort( + port_id=port['id'], + router_id=router.id, + port_type=device_owner + ) + context.session.add(router_port) + # Update owner after actual process again in order to + # make sure the records in routerports table and ports + # table are consistent. + self._core_plugin.update_port( + context, port['id'], {'port': { + 'device_id': router.id, + 'device_owner': device_owner}}) # NOTE: For IPv6 additional subnets added to the same # network we need to update the CSNAT port with respective @@ -355,9 +364,41 @@ if cs_port: fixed_ips = list(cs_port['port']['fixed_ips']) fixed_ips.append(fixed_ip) - updated_port = self._core_plugin.update_port( - context.elevated(), - cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}}) + try: + updated_port = self._core_plugin.update_port( + context.elevated(), + cs_port['port_id'], + {'port': {'fixed_ips': fixed_ips}}) + except Exception: + with excutils.save_and_reraise_exception(): + # we need to try to undo the updated router + # interface from above so it's not out of sync + # with the csnat port. + # TODO(kevinbenton): switch to taskflow to manage + # these rollbacks. + @db_api.retry_db_errors + def revert(): + # TODO(kevinbenton): even though we get the + # port each time, there is a potential race + # where we update the port with stale IPs if + # another interface operation is occuring at + # the same time. This can be fixed in the + # future with a compare-and-swap style update + # using the revision number of the port. + p = self._core_plugin.get_port( + context.elevated(), port['id']) + upd = {'port': {'fixed_ips': [ + ip for ip in p['fixed_ips'] + if ip['subnet_id'] != fixed_ip['subnet_id'] + ]}} + self._core_plugin.update_port( + context.elevated(), port['id'], upd) + try: + revert() + except Exception: + LOG.exception(_LE("Failed to revert change " + "to router port %s."), + port['id']) LOG.debug("CSNAT port updated for IPv6 subnet: " "%s", updated_port) router_interface_info = self._make_router_interface_info( @@ -901,7 +942,9 @@ filters={'id': ports} ) for p in c_snat_ports: - if subnet_id is None: + if subnet_id is None or not p['fixed_ips']: + if not p['fixed_ips']: + LOG.debug("CSNAT port has no IPs: %s", p) self._core_plugin.delete_port(context, p['id'], l3_port_check=False) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/l3_dvr_ha_scheduler_db.py neutron-9.0.0~b3~dev557/neutron/db/l3_dvr_ha_scheduler_db.py --- neutron-9.0.0~b2~dev280/neutron/db/l3_dvr_ha_scheduler_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/l3_dvr_ha_scheduler_db.py 2016-08-03 20:10:33.000000000 +0000 @@ -16,10 +16,6 @@ import neutron.db.l3_dvrscheduler_db as l3agent_dvr_sch_db import neutron.db.l3_hascheduler_db as l3_ha_sch_db -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - class L3_DVR_HA_scheduler_db_mixin(l3agent_dvr_sch_db.L3_DVRsch_db_mixin, l3_ha_sch_db.L3_HA_scheduler_db_mixin): diff -Nru neutron-9.0.0~b2~dev280/neutron/db/l3_dvrscheduler_db.py neutron-9.0.0~b3~dev557/neutron/db/l3_dvrscheduler_db.py --- neutron-9.0.0~b2~dev280/neutron/db/l3_dvrscheduler_db.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/l3_dvrscheduler_db.py 2016-08-03 20:10:33.000000000 +0000 @@ -20,7 +20,6 @@ from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources -from neutron.common import constants as const from neutron.common import utils as n_utils from neutron.db import agentschedulers_db @@ -184,9 +183,8 @@ admin_context, filters=filter_rtr) for port in int_ports: dvr_binding = (ml2_db. - get_dvr_port_binding_by_host(context.session, - port['id'], - port_host)) + get_distributed_port_binding_by_host( + context.session, port['id'], port_host)) if dvr_binding: # unbind this port from router dvr_binding['router_id'] = None @@ -287,8 +285,8 @@ # dvr routers are not explicitly scheduled to agents on hosts with # dvr serviceable ports, so need special handling - if self._get_agent_mode(agent_db) in [const.L3_AGENT_MODE_DVR, - const.L3_AGENT_MODE_DVR_SNAT]: + if self._get_agent_mode(agent_db) in [n_const.L3_AGENT_MODE_DVR, + n_const.L3_AGENT_MODE_DVR_SNAT]: if not router_ids: result_set |= set(self._get_dvr_router_ids_for_host( context, agent_db['host'])) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/l3_hamode_db.py neutron-9.0.0~b3~dev557/neutron/db/l3_hamode_db.py --- neutron-9.0.0~b2~dev280/neutron/db/l3_hamode_db.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/l3_hamode_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -36,6 +36,7 @@ from neutron.db import agents_db from neutron.db.availability_zone import router as router_az_db from neutron.db import common_db_mixin +from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db.l3_dvr_db import is_distributed_router from neutron.db import model_base @@ -63,9 +64,14 @@ "scheduled on. If it is set to 0 then the router will " "be scheduled on every agent.")), cfg.IntOpt('min_l3_agents_per_router', - default=n_const.MINIMUM_AGENTS_FOR_HA, - help=_("Minimum number of L3 agents which a HA router will be " - "scheduled on.")), + default=n_const.DEFAULT_MINIMUM_AGENTS_FOR_HA, + help=_("DEPRECATED: Minimum number of L3 agents that have to " + "be available in order to allow a new HA router to be " + "scheduled. This option is deprecated in the Newton " + "release and will be removed for the Ocata release " + "where the scheduling of new HA routers will always " + "be allowed."), + deprecated_for_removal=True), cfg.StrOpt('l3_ha_net_cidr', default=n_const.L3_HA_NET_CIDR, help=_('Subnet used for the l3 HA admin network.')), @@ -118,7 +124,7 @@ server_default=n_const.HA_ROUTER_STATE_STANDBY) -class L3HARouterNetwork(model_base.BASEV2): +class L3HARouterNetwork(model_base.BASEV2, model_base.HasProjectPrimaryKey): """Host HA network for a tenant. One HA Network is used per tenant, all HA router ports are created @@ -127,8 +133,6 @@ __tablename__ = 'ha_router_networks' - tenant_id = sa.Column(sa.String(attributes.TENANT_ID_MAX_LEN), - primary_key=True, nullable=False) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), nullable=False, primary_key=True) @@ -179,7 +183,7 @@ raise l3_ha.HAMaximumAgentsNumberNotValid( max_agents=max_agents, min_agents=min_agents) - if min_agents < n_const.MINIMUM_AGENTS_FOR_HA: + if min_agents < n_const.MINIMUM_MINIMUM_AGENTS_FOR_HA: raise l3_ha.HAMinimumAgentsNumberNotValid() def __init__(self): @@ -315,8 +319,8 @@ min_agents = cfg.CONF.min_l3_agents_per_router num_agents = len(self.get_l3_agents(context, active=True, - filters={'agent_modes': [n_const.L3_AGENT_MODE_LEGACY, - n_const.L3_AGENT_MODE_DVR_SNAT]})) + filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY, + constants.L3_AGENT_MODE_DVR_SNAT]})) max_agents = cfg.CONF.max_l3_agents_per_router if max_agents: if max_agents > num_agents: @@ -335,6 +339,10 @@ def _create_ha_port_binding(self, context, router_id, port_id): try: with context.session.begin(): + routerportbinding = l3_db.RouterPort( + port_id=port_id, router_id=router_id, + port_type=constants.DEVICE_OWNER_ROUTER_HA_INTF) + context.session.add(routerportbinding) portbinding = L3HARouterAgentPortBinding(port_id=port_id, router_id=router_id) context.session.add(portbinding) @@ -447,9 +455,10 @@ context, router_db.tenant_id) dep_creator = functools.partial(self._create_ha_network, context, router_db.tenant_id) + dep_deleter = functools.partial(self._delete_ha_network, context) dep_id_attr = 'network_id' return n_utils.create_object_with_dependency( - creator, dep_getter, dep_creator, dep_id_attr) + creator, dep_getter, dep_creator, dep_id_attr, dep_deleter) def create_router(self, context, router): is_ha = self._is_ha(router['router']) @@ -556,6 +565,10 @@ self._set_vr_id(context, router_db, ha_network) else: self._delete_ha_interfaces(context, router_db.id) + # always attempt to cleanup the network as the router is + # deleted. the core plugin will stop us if its in use + self.safe_delete_ha_network(context, ha_network, + router_db.tenant_id) self.schedule_router(context, router_id) router_db = super(L3_HA_NAT_db_mixin, self)._update_router_db( @@ -569,6 +582,26 @@ admin_ctx = context.elevated() self._core_plugin.delete_network(admin_ctx, net.network_id) + def safe_delete_ha_network(self, context, ha_network, tenant_id): + try: + self._delete_ha_network(context, ha_network) + except (n_exc.NetworkNotFound, + orm.exc.ObjectDeletedError): + LOG.debug( + "HA network for tenant %s was already deleted.", tenant_id) + except sa.exc.InvalidRequestError: + LOG.info(_LI("HA network %s can not be deleted."), + ha_network.network_id) + except n_exc.NetworkInUse: + # network is still in use, this is normal so we don't + # log anything + pass + else: + LOG.info(_LI("HA network %(network)s was deleted as " + "no HA routers are present in tenant " + "%(tenant)s."), + {'network': ha_network.network_id, 'tenant': tenant_id}) + def delete_router(self, context, id): router_db = self._get_router(context, id) super(L3_HA_NAT_db_mixin, self).delete_router(context, id) @@ -579,30 +612,11 @@ if ha_network: self._delete_vr_id_allocation( context, ha_network, router_db.extra_attributes.ha_vr_id) - self._delete_ha_interfaces(context, router_db.id) # always attempt to cleanup the network as the router is # deleted. the core plugin will stop us if its in use - try: - self._delete_ha_network(context, ha_network) - except (n_exc.NetworkNotFound, - orm.exc.ObjectDeletedError): - LOG.debug( - "HA network for tenant %s was already deleted.", - router_db.tenant_id) - except sa.exc.InvalidRequestError: - LOG.info(_LI("HA network %s can not be deleted."), - ha_network.network_id) - except n_exc.NetworkInUse: - # network is still in use, this is normal so we don't - # log anything - pass - else: - LOG.info(_LI("HA network %(network)s was deleted as " - "no HA routers are present in tenant " - "%(tenant)s."), - {'network': ha_network.network_id, - 'tenant': router_db.tenant_id}) + self.safe_delete_ha_network(context, ha_network, + router_db.tenant_id) def _unbind_ha_router(self, context, router_id): for agent in self.get_l3_agents_hosting_routers(context, [router_id]): @@ -681,7 +695,15 @@ routers_dict.keys(), host) for binding in bindings: - port_dict = self._core_plugin._make_port_dict(binding.port) + port = binding.port + if not port: + # Filter the HA router has no ha port here + LOG.info(_LI("HA router %s is missing HA router port " + "bindings. Skipping it."), + binding.router_id) + routers_dict.pop(binding.router_id) + continue + port_dict = self._core_plugin._make_port_dict(port) router = routers_dict.get(binding.router_id) router[constants.HA_INTERFACE_KEY] = port_dict @@ -692,14 +714,16 @@ if interface: self._populate_mtu_and_subnets_for_ports(context, [interface]) + # Could not filter the HA_INTERFACE_KEY here, because a DVR router + # with SNAT HA in DVR compute host also does not have that attribute. return list(routers_dict.values()) @log_helpers.log_method_call def get_ha_sync_data_for_host(self, context, host, agent, router_ids=None, active=None): agent_mode = self._get_agent_mode(agent) - dvr_agent_mode = (agent_mode in [n_const.L3_AGENT_MODE_DVR_SNAT, - n_const.L3_AGENT_MODE_DVR]) + dvr_agent_mode = (agent_mode in [constants.L3_AGENT_MODE_DVR_SNAT, + constants.L3_AGENT_MODE_DVR]) if (dvr_agent_mode and n_utils.is_extension_supported( self, constants.L3_DISTRIBUTED_EXT_ALIAS)): # DVR has to be handled differently diff -Nru neutron-9.0.0~b2~dev280/neutron/db/metering/metering_db.py neutron-9.0.0~b3~dev557/neutron/db/metering/metering_db.py --- neutron-9.0.0~b2~dev280/neutron/db/metering/metering_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/metering/metering_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -38,7 +38,9 @@ excluded = sa.Column(sa.Boolean, default=False, server_default=sql.false()) -class MeteringLabel(model_base.BASEV2, model_base.HasId, model_base.HasTenant): +class MeteringLabel(model_base.BASEV2, + model_base.HasId, + model_base.HasProject): name = sa.Column(sa.String(attr.NAME_MAX_LEN)) description = sa.Column(sa.String(attr.LONG_DESCRIPTION_MAX_LEN)) rules = orm.relationship(MeteringLabelRule, backref="label", diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/CONTRACT_HEAD 2016-08-29 20:05:49.000000000 +0000 @@ -1 +1 @@ -4bcd4df1f426 +3b935b28e7a0 diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/EXPAND_HEAD 2016-08-29 20:05:49.000000000 +0000 @@ -1 +1 @@ -c415aab1c048 +a5648cfeeadf diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,131 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""migrate to pluggable ipam """ + +# revision identifiers, used by Alembic. +revision = '3b935b28e7a0' +down_revision = 'a8b517cff8ab' + +from alembic import op +from oslo_utils import uuidutils +import sqlalchemy as sa + +# A simple models for tables with only the fields needed for the migration. +neutron_subnet = sa.Table('subnets', sa.MetaData(), + sa.Column('id', sa.String(length=36), + nullable=False)) + +ipam_subnet = sa.Table('ipamsubnets', sa.MetaData(), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('neutron_subnet_id', sa.String(length=36), + nullable=True)) + +ip_allocation_pool = sa.Table('ipallocationpools', sa.MetaData(), + sa.Column('id', sa.String(length=36), + nullable=False), + sa.Column('subnet_id', sa.String(length=36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + nullable=False), + sa.Column('first_ip', sa.String(length=64), + nullable=False), + sa.Column('last_ip', sa.String(length=64), + nullable=False)) + +ipam_allocation_pool = sa.Table('ipamallocationpools', sa.MetaData(), + sa.Column('id', sa.String(length=36), + nullable=False), + sa.Column('ipam_subnet_id', + sa.String(length=36), + sa.ForeignKey('ipamsubnets.id', + ondelete="CASCADE"), + nullable=False), + sa.Column('first_ip', sa.String(length=64), + nullable=False), + sa.Column('last_ip', sa.String(length=64), + nullable=False)) + +ip_allocation = sa.Table('ipallocations', sa.MetaData(), + sa.Column('ip_address', sa.String(length=64), + nullable=False), + sa.Column('subnet_id', sa.String(length=36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"))) + +ipam_allocation = sa.Table('ipamallocations', sa.MetaData(), + sa.Column('ip_address', sa.String(length=64), + nullable=False, primary_key=True), + sa.Column('ipam_subnet_id', sa.String(length=36), + sa.ForeignKey('subnets.id', + ondelete="CASCADE"), + primary_key=True), + sa.Column('status', sa.String(length=36))) + + +def upgrade(): + """Migrate data to pluggable ipam reference driver. + + Tables 'subnets', 'ipallocationpools' and 'ipallocations' are API exposed + and always contain up to date data independently from the ipam driver + in use, so they can be used as a reliable source of data. + + This migration cleans up tables for reference ipam driver and rebuilds them + from API exposed tables. So this migration will work correctly for both + types of users: + - Who used build-in ipam implementation; + Their ipam data will be migrated to reference ipam driver tables, + and reference ipam driver becomes default driver. + - Who switched to reference ipam before Newton; + Existent reference ipam driver tables are cleaned up and all ipam data is + regenerated from API exposed tables. + All existent subnets and ports are still usable after upgrade. + """ + session = sa.orm.Session(bind=op.get_bind()) + + # Make sure destination tables are clean + session.execute(ipam_subnet.delete()) + session.execute(ipam_allocation_pool.delete()) + session.execute(ipam_allocation.delete()) + + map_neutron_id_to_ipam = {} + subnet_values = [] + for subnet_id, in session.query(neutron_subnet): + ipam_id = uuidutils.generate_uuid() + map_neutron_id_to_ipam[subnet_id] = ipam_id + subnet_values.append(dict( + id=ipam_id, + neutron_subnet_id=subnet_id)) + op.bulk_insert(ipam_subnet, subnet_values) + + ipam_pool_values = [] + pools = session.query(ip_allocation_pool) + for pool in pools: + new_pool_id = uuidutils.generate_uuid() + ipam_pool_values.append(dict( + id=new_pool_id, + ipam_subnet_id=map_neutron_id_to_ipam[pool.subnet_id], + first_ip=pool.first_ip, + last_ip=pool.last_ip)) + op.bulk_insert(ipam_allocation_pool, ipam_pool_values) + + ipam_allocation_values = [] + for ip_alloc in session.query(ip_allocation): + ipam_allocation_values.append(dict( + ip_address=ip_alloc.ip_address, + status='ALLOCATED', + ipam_subnet_id=map_neutron_id_to_ipam[ip_alloc.subnet_id])) + op.bulk_insert(ipam_allocation, ipam_allocation_values) + session.commit() diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,161 @@ +# Copyright 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""rename tenant to project + +Revision ID: 7d9d8eeec6ad +Create Date: 2016-06-29 19:42:17.862721 + +""" + +# revision identifiers, used by Alembic. +revision = '7d9d8eeec6ad' +down_revision = 'a84ccf28f06a' +depends_on = ('5abc0278ca73',) + +from alembic import op +import sqlalchemy as sa + + +_INSPECTOR = None + + +def get_inspector(): + """Reuse inspector""" + + global _INSPECTOR + + if _INSPECTOR: + return _INSPECTOR + + else: + bind = op.get_bind() + _INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind) + + return _INSPECTOR + + +def get_tables(): + """ + Returns hardcoded list of tables which have ``tenant_id`` column. + + DB head can be changed. To prevent possible problems, when models will be + updated, return hardcoded list of tables, up-to-date for this day. + + Output retrieved by using: + + >>> metadata = head.get_metadata() + >>> all_tables = metadata.sorted_tables + >>> tenant_tables = [] + >>> for table in all_tables: + ... for column in table.columns: + ... if column.name == 'tenant_id': + ... tenant_tables.append((table, column)) + + """ + + tables = [ + 'address_scopes', + 'floatingips', + 'meteringlabels', + 'networkrbacs', + 'networks', + 'ports', + 'qos_policies', + 'qospolicyrbacs', + 'quotas', + 'reservations', + 'routers', + 'securitygrouprules', + 'securitygroups', + 'subnetpools', + 'subnets', + 'trunks', + 'auto_allocated_topologies', + 'default_security_group', + 'ha_router_networks', + 'quotausages', + ] + + return tables + + +def get_columns(table): + """Returns list of columns for given table.""" + inspector = get_inspector() + return inspector.get_columns(table) + + +def get_data(): + """Returns combined list of tuples: [(table, column)]. + + List is built, based on retrieved tables, where column with name + ``tenant_id`` exists. + """ + + output = [] + tables = get_tables() + for table in tables: + columns = get_columns(table) + + for column in columns: + if column['name'] == 'tenant_id': + output.append((table, column)) + + return output + + +def alter_column(table, column): + old_name = 'tenant_id' + new_name = 'project_id' + + op.alter_column( + table_name=table, + column_name=old_name, + new_column_name=new_name, + existing_type=column['type'], + existing_nullable=column['nullable'] + ) + + +def recreate_index(index, table_name): + old_name = index['name'] + new_name = old_name.replace('tenant', 'project') + + op.drop_index(op.f(old_name), table_name) + op.create_index(new_name, table_name, ['project_id']) + + +def upgrade(): + inspector = get_inspector() + + data = get_data() + for table, column in data: + alter_column(table, column) + + indexes = inspector.get_indexes(table) + for index in indexes: + if 'tenant_id' in index['name']: + recreate_index(index, table) + + +def contract_creation_exceptions(): + """Special migration for the blueprint to support Keystone V3. + We drop all tenant_id columns and create project_id columns instead. + """ + return { + sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()], + sa.Index: get_tables() + } diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,69 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""migrate dns name from port""" + +# revision identifiers, used by Alembic. +revision = 'a84ccf28f06a' +down_revision = 'b67e765a3524' +depends_on = ('a963b38d82f4',) + +from alembic import op +from neutron.extensions import dns +import sqlalchemy as sa + + +ports = sa.Table( + 'ports', sa.MetaData(), + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('dns_name', sa.String(length=dns.FQDN_MAX_LEN), nullable=True)) + + +portdnses = sa.Table('portdnses', sa.MetaData(), + sa.Column('port_id', sa.String(36), + sa.ForeignKey('ports.id', + ondelete="CASCADE"), + primary_key=True, index=True), + sa.Column('dns_name', sa.String(length=255), + nullable=False), + + sa.Column('current_dns_name', sa.String(255), + nullable=False), + sa.Column('current_dns_domain', sa.String(255), + nullable=False), + sa.Column('previous_dns_name', sa.String(255), + nullable=False), + sa.Column('previous_dns_domain', sa.String(255), + nullable=False)) + + +def migrate_records_for_existing(): + session = sa.orm.Session(bind=op.get_bind()) + with session.begin(subtransactions=True): + for row in session.query(ports): + if row[1]: + res = session.execute(portdnses.update().values( + dns_name=row[1]).where(portdnses.c.port_id == row[0])) + if res.rowcount == 0: + session.execute(portdnses.insert().values( + port_id=row[0], current_dns_name='', + current_dns_domain='', previous_dns_name='', + previous_dns_domain='', dns_name=row[1])) + session.commit() + + +def upgrade(): + migrate_records_for_existing() + op.drop_column('ports', 'dns_name') diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Add routerport bindings for L3 HA + +Revision ID: a8b517cff8ab +Revises: a8b517cff8ab +Create Date: 2016-07-18 14:31:45.725516 + +""" + +# revision identifiers, used by Alembic. +revision = 'a8b517cff8ab' +down_revision = '7d9d8eeec6ad' + +from alembic import op +from neutron_lib import constants as lib_const +import sqlalchemy as sa + +from neutron.common import constants + + +HA_AGENT_BINDINGS = 'ha_router_agent_port_bindings' +ROUTER_PORTS = 'routerports' + + +def upgrade(): + ha_bindings = sa.Table( + HA_AGENT_BINDINGS, + sa.MetaData(), + sa.Column('port_id', sa.String(36)), + sa.Column('router_id', sa.String(36)), + sa.Column('l3_agent_id', sa.String(36)), + sa.Column('state', sa.Enum(constants.HA_ROUTER_STATE_ACTIVE, + constants.HA_ROUTER_STATE_STANDBY, + name='l3_ha_states')) + ) + router_ports = sa.Table(ROUTER_PORTS, + sa.MetaData(), + sa.Column('router_id', sa.String(36)), + sa.Column('port_id', sa.String(36)), + sa.Column('port_type', sa.String(255))) + session = sa.orm.Session(bind=op.get_bind()) + with session.begin(subtransactions=True): + router_port_tuples = set() + for ha_bind in session.query(ha_bindings): + router_port_tuples.add((ha_bind.router_id, ha_bind.port_id)) + # we have to remove any from the bulk insert that may already exist + # as a result of Ifd3e007aaf2a2ed8123275aa3a9f540838e3c003 being + # back-ported + for router_port in session.query(router_ports).filter( + router_ports.c.port_type == lib_const.DEVICE_OWNER_ROUTER_HA_INTF): + router_port_tuples.discard((router_port.router_id, + router_port.port_id)) + new_records = [dict(router_id=router_id, port_id=port_id, + port_type=lib_const.DEVICE_OWNER_ROUTER_HA_INTF) + for router_id, port_id in router_port_tuples] + op.bulk_insert(router_ports, new_records) + session.commit() diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Remove mtu column from networks. + +Revision ID: b67e765a3524 +Revises: 4bcd4df1f426 +Create Date: 2016-07-17 02:07:36.625196 + +""" + +# revision identifiers, used by Alembic. +revision = 'b67e765a3524' +down_revision = '4bcd4df1f426' + +from alembic import op + + +def upgrade(): + op.drop_column('networks', 'mtu') diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,68 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""uniq_routerports0port_id + +Revision ID: 030a959ceafa +Revises: 3d0e74aa7d37 +Create Date: 2016-06-21 11:33:13.043879 + +""" + +# revision identifiers, used by Alembic. +revision = '030a959ceafa' +down_revision = '3d0e74aa7d37' + +from alembic import op +from neutron_lib import exceptions +import sqlalchemy as sa + +from neutron._i18n import _ + +routerports = sa.Table( + 'routerports', sa.MetaData(), + sa.Column('router_id', sa.String(36)), + sa.Column('port_id', sa.String(36)), + sa.Column('port_type', sa.String(255))) + + +class DuplicatePortRecordinRouterPortdatabase(exceptions.Conflict): + message = _("Duplicate port(s) %(port_id)s records exist in routerports " + "database. Database cannot be upgraded. Please remove all " + "duplicated records before upgrading the database.") + + +def upgrade(): + op.create_unique_constraint( + 'uniq_routerports0port_id', + 'routerports', + ['port_id']) + + +def check_sanity(connection): + res = get_duplicate_port_records_in_routerport_database(connection) + if res: + raise DuplicatePortRecordinRouterPortdatabase(port_id=",".join(res)) + + +def get_duplicate_port_records_in_routerport_database(connection): + insp = sa.engine.reflection.Inspector.from_engine(connection) + if 'routerports' not in insp.get_table_names(): + return [] + session = sa.orm.Session(bind=connection.connect()) + query = (session.query(routerports.c.port_id) + .group_by(routerports.c.port_id) + .having(sa.func.count() > 1)).all() + return [q[0] for q in query] diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,43 @@ +# Copyright 2016 Mirantis +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add flavor_id to Router + +Revision ID: 3d0e74aa7d37 +Revises: a963b38d82f4 +Create Date: 2016-05-05 00:22:47.618593 + +""" + +from alembic import op +import sqlalchemy as sa + +from neutron.db import migration + + +# revision identifiers, used by Alembic. +revision = '3d0e74aa7d37' +down_revision = 'a963b38d82f4' + +# milestone identifier, used by neutron-db-manage +neutron_milestone = [migration.NEWTON] + + +def upgrade(): + op.add_column('routers', + sa.Column('flavor_id', + sa.String(length=36), + sa.ForeignKey('flavors.id'), + nullable=True)) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py 2016-08-03 20:10:33.000000000 +0000 @@ -18,14 +18,20 @@ from alembic import op import sqlalchemy as sa +from sqlalchemy import sql def upgrade(): op.create_table('trunks', + sa.Column('admin_state_up', sa.Boolean(), + nullable=False, server_default=sql.true()), sa.Column('tenant_id', sa.String(length=255), nullable=True, index=True), sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=True), sa.Column('port_id', sa.String(length=36), nullable=False), + sa.Column('status', sa.String(length=16), + nullable=False, server_default='ACTIVE'), sa.Column('standard_attr_id', sa.BigInteger(), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,39 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company, LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Add support for Subnet Service Types + +Revision ID: a5648cfeeadf +Revises: 030a959ceafa +Create Date: 2016-03-15 18:00:00.190173 + +""" + +# revision identifiers, used by Alembic. +revision = 'a5648cfeeadf' +down_revision = '030a959ceafa' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.create_table('subnet_service_types', + sa.Column('subnet_id', sa.String(length=36)), + sa.Column('service_type', sa.String(length=255)), + sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('subnet_id', 'service_type') + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,28 @@ +# Copyright 2016 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add dns name to portdnses""" + +# revision identifiers, used by Alembic. +revision = 'a963b38d82f4' +down_revision = 'c415aab1c048' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('portdnses', + sa.Column('dns_name', sa.String(length=255), nullable=False)) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/__init__.py neutron-9.0.0~b3~dev557/neutron/db/migration/__init__.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/__init__.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/__init__.py 2016-08-03 20:10:33.000000000 +0000 @@ -157,6 +157,27 @@ 'columns': values}) +def get_unique_constraints_map(table): + inspector = reflection.Inspector.from_engine(op.get_bind()) + return { + tuple(sorted(cons['column_names'])): cons['name'] + for cons in inspector.get_unique_constraints(table) + } + + +def remove_fk_unique_constraints(table, foreign_keys): + unique_constraints_map = get_unique_constraints_map(table) + for fk in foreign_keys: + constraint_name = unique_constraints_map.get( + tuple(sorted(fk['constrained_columns']))) + if constraint_name: + op.drop_constraint( + constraint_name=constraint_name, + table_name=table, + type_="unique" + ) + + def remove_foreign_keys(table, foreign_keys): for fk in foreign_keys: op.drop_constraint( @@ -179,11 +200,13 @@ @contextlib.contextmanager -def remove_fks_from_table(table): +def remove_fks_from_table(table, remove_unique_constraints=False): try: inspector = reflection.Inspector.from_engine(op.get_bind()) foreign_keys = inspector.get_foreign_keys(table) remove_foreign_keys(table, foreign_keys) + if remove_unique_constraints: + remove_fk_unique_constraints(table, foreign_keys) yield finally: create_foreign_keys(table, foreign_keys) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/migration/models/head.py neutron-9.0.0~b3~dev557/neutron/db/migration/models/head.py --- neutron-9.0.0~b2~dev280/neutron/db/migration/models/head.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/migration/models/head.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,14 +21,15 @@ """ -from neutron.db import address_scope_db # noqa +import os.path + +from neutron.common import utils from neutron.db import agents_db # noqa from neutron.db import agentschedulers_db # noqa -from neutron.db.allowed_address_pairs import models # noqa from neutron.db import dns_db # noqa from neutron.db import dvr_mac_db # noqa from neutron.db import external_net_db # noqa -from neutron.db.extra_dhcp_opt import models # noqa +from neutron.db.extra_dhcp_opt import models as edo_models # noqa from neutron.db import extraroute_db # noqa from neutron.db import flavors_db # noqa from neutron.db import l3_agentschedulers_db # noqa @@ -39,27 +40,28 @@ from neutron.db import l3_hamode_db # noqa from neutron.db.metering import metering_db # noqa from neutron.db import model_base +from neutron.db import models from neutron.db import models_v2 # noqa +from neutron.db.port_security import models as ps_models # noqa from neutron.db import portbindings_db # noqa -from neutron.db import portsecurity_db # noqa from neutron.db import provisioning_blocks # noqa from neutron.db.qos import models as qos_models # noqa -from neutron.db.quota import models # noqa +from neutron.db.quota import models as quota_models # noqa from neutron.db import rbac_db_models # noqa -from neutron.db import securitygroups_db # noqa from neutron.db import segments_db # noqa from neutron.db import servicetype_db # noqa from neutron.db import tag_db # noqa from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa -from neutron.plugins.ml2.drivers import type_flat # noqa from neutron.plugins.ml2.drivers import type_geneve # noqa -from neutron.plugins.ml2.drivers import type_gre # noqa from neutron.plugins.ml2.drivers import type_vlan # noqa from neutron.plugins.ml2.drivers import type_vxlan # noqa -from neutron.plugins.ml2 import models # noqa -from neutron.services.auto_allocate import models # noqa +from neutron.plugins.ml2 import models as ml2_models # noqa +from neutron.services.auto_allocate import models as aa_models # noqa from neutron.services.segments import db # noqa -from neutron.services.trunk import models # noqa +from neutron.services.trunk import models as trunk_models # noqa + + +utils.import_modules_recursively(os.path.dirname(models.__file__)) def get_metadata(): diff -Nru neutron-9.0.0~b2~dev280/neutron/db/model_base.py neutron-9.0.0~b3~dev557/neutron/db/model_base.py --- neutron-9.0.0~b2~dev280/neutron/db/model_base.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/model_base.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,26 +13,68 @@ # See the License for the specific language governing permissions and # limitations under the License. +import debtcollector from oslo_db.sqlalchemy import models from oslo_utils import uuidutils import sqlalchemy as sa -from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.ext import declarative from sqlalchemy import orm from neutron.api.v2 import attributes as attr -class HasTenant(object): - """Tenant mixin, add to subclasses that have a tenant.""" +class HasProject(object): + """Project mixin, add to subclasses that have a user.""" - # NOTE(jkoelker) tenant_id is just a free form string ;( - tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True) + # NOTE(jkoelker) project_id is just a free form string ;( + project_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), index=True) + + def get_tenant_id(self): + return self.project_id + + def set_tenant_id(self, value): + self.project_id = value + + @declarative.declared_attr + def tenant_id(cls): + return orm.synonym( + 'project_id', + descriptor=property(cls.get_tenant_id, cls.set_tenant_id)) + + +HasTenant = debtcollector.moves.moved_class(HasProject, "HasTenant", __name__) + + +class HasProjectNoIndex(HasProject): + """Project mixin, add to subclasses that have a user.""" + + # NOTE(jkoelker) project_id is just a free form string ;( + project_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN)) + + +class HasProjectPrimaryKeyIndex(HasProject): + """Project mixin, add to subclasses that have a user.""" + + # NOTE(jkoelker) project_id is just a free form string ;( + project_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False, + primary_key=True, index=True) + + +class HasProjectPrimaryKey(HasProject): + """Project mixin, add to subclasses that have a user.""" + + # NOTE(jkoelker) project_id is just a free form string ;( + project_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False, + primary_key=True) class HasId(object): """id mixin, add to subclasses that have an id.""" + def __init__(self, *args, **kwargs): + # NOTE(dasm): debtcollector requires init in class + super(HasId, self).__init__(*args, **kwargs) + id = sa.Column(sa.String(36), primary_key=True, default=uuidutils.generate_uuid) @@ -41,6 +83,10 @@ class HasStatusDescription(object): """Status with description mixin.""" + def __init__(self, *args, **kwargs): + # NOTE(dasm): debtcollector requires init in class + super(HasStatusDescription, self).__init__(*args, **kwargs) + status = sa.Column(sa.String(16), nullable=False) status_description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) @@ -80,100 +126,11 @@ BASEV2 = declarative.declarative_base(cls=NeutronBaseV2) -class StandardAttribute(BASEV2, models.TimestampMixin): - """Common table to associate all Neutron API resources. - - By having Neutron objects related to this table, we can associate new - tables that apply to many Neutron objects (e.g. timestamps, rbac entries) - to this table to avoid schema duplication while maintaining referential - integrity. - - NOTE(kevinbenton): This table should not have more columns added to it - unless we are absolutely certain the new column will have a value for - every single type of Neutron resource. Otherwise this table will be filled - with NULL entries for combinations that don't make sense. Additionally, - by keeping this table small we can ensure that performance isn't adversely - impacted for queries on objects. - """ - - # sqlite doesn't support auto increment on big integers so we use big int - # for everything but sqlite - id = sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), - primary_key=True, autoincrement=True) - - # NOTE(kevinbenton): this column is redundant information, but it allows - # operators/devs to look at the contents of this table and know which table - # the corresponding object is in. - # 255 was selected as a max just because it's the varchar ceiling in mysql - # before a 2-byte prefix is required. We shouldn't get anywhere near this - # limit with our table names... - resource_type = sa.Column(sa.String(255), nullable=False) - description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) - - revision_number = sa.Column( - sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), - server_default='0', nullable=False) - - __mapper_args__ = { - # see http://docs.sqlalchemy.org/en/latest/orm/versioning.html for - # details about how this works - "version_id_col": revision_number - } - - -class HasStandardAttributes(object): - @declarative.declared_attr - def standard_attr_id(cls): - return sa.Column( - sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), - sa.ForeignKey(StandardAttribute.id, ondelete="CASCADE"), - unique=True, - nullable=False - ) - - # NOTE(kevinbenton): we have to disable the following pylint check because - # it thinks we are overriding this method in the __init__ method. - #pylint: disable=method-hidden - @declarative.declared_attr - def standard_attr(cls): - return orm.relationship(StandardAttribute, - lazy='joined', - cascade='all, delete-orphan', - single_parent=True, - uselist=False) - - def __init__(self, description='', *args, **kwargs): - super(HasStandardAttributes, self).__init__(*args, **kwargs) - # here we automatically create the related standard attribute object - self.standard_attr = StandardAttribute( - resource_type=self.__tablename__, description=description) - - @declarative.declared_attr - def description(cls): - return association_proxy('standard_attr', 'description') - - @declarative.declared_attr - def created_at(cls): - return association_proxy('standard_attr', 'created_at') - - @declarative.declared_attr - def updated_at(cls): - return association_proxy('standard_attr', 'updated_at') - - def update(self, new_dict): - # ignore the timestamps if they were passed in. For example, this - # happens if code calls update_port with modified results of get_port - new_dict.pop('created_at', None) - new_dict.pop('updated_at', None) - super(HasStandardAttributes, self).update(new_dict) - - @declarative.declared_attr - def revision_number(cls): - return association_proxy('standard_attr', 'revision_number') - - def bump_revision(self): - # SQLAlchemy will bump the version for us automatically if the - # standard attr record is being modified, but we must call this - # for all other modifications or when relevant children are being - # modified (e.g. fixed_ips change should bump port revision) - self.standard_attr.revision_number += 1 +def get_unique_keys(model): + try: + constraints = model.__table__.constraints + except AttributeError: + constraints = [] + return [[c.name for c in constraint.columns] + for constraint in constraints + if isinstance(constraint, sa.UniqueConstraint)] diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/address_scope.py neutron-9.0.0~b3~dev557/neutron/db/models/address_scope.py --- neutron-9.0.0~b2~dev280/neutron/db/models/address_scope.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/address_scope.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from neutron.api.v2 import attributes as attr +from neutron.db import model_base + + +class AddressScope(model_base.BASEV2, model_base.HasId, model_base.HasProject): + """Represents a neutron address scope.""" + + __tablename__ = "address_scopes" + + name = sa.Column(sa.String(attr.NAME_MAX_LEN), nullable=False) + shared = sa.Column(sa.Boolean, nullable=False) + ip_version = sa.Column(sa.Integer(), nullable=False) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/allowed_address_pair.py neutron-9.0.0~b3~dev557/neutron/db/models/allowed_address_pair.py --- neutron-9.0.0~b2~dev280/neutron/db/models/allowed_address_pair.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/allowed_address_pair.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import model_base +from neutron.db import models_v2 + + +class AllowedAddressPair(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True) + ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) + + port = orm.relationship( + models_v2.Port, + backref=orm.backref("allowed_address_pairs", + lazy="joined", cascade="delete")) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/plugins/ml2/flatallocation.py neutron-9.0.0~b3~dev557/neutron/db/models/plugins/ml2/flatallocation.py --- neutron-9.0.0~b2~dev280/neutron/db/models/plugins/ml2/flatallocation.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/plugins/ml2/flatallocation.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa + +from neutron.db import model_base + + +class FlatAllocation(model_base.BASEV2): + """Represent persistent allocation state of a physical network. + + If a record exists for a physical network, then that physical + network has been allocated as a flat network. + """ + + __tablename__ = 'ml2_flat_allocations' + + physical_network = sa.Column(sa.String(64), nullable=False, + primary_key=True) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/plugins/ml2/gre_allocation_endpoints.py neutron-9.0.0~b3~dev557/neutron/db/models/plugins/ml2/gre_allocation_endpoints.py --- neutron-9.0.0~b2~dev280/neutron/db/models/plugins/ml2/gre_allocation_endpoints.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/plugins/ml2/gre_allocation_endpoints.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,45 @@ +# Copyright (c) 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import sql + +from neutron.db import model_base + + +class GreAllocation(model_base.BASEV2): + + __tablename__ = 'ml2_gre_allocations' + + gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, + autoincrement=False) + allocated = sa.Column(sa.Boolean, nullable=False, default=False, + server_default=sql.false(), index=True) + + +class GreEndpoints(model_base.BASEV2): + """Represents tunnel endpoint in RPC mode.""" + + __tablename__ = 'ml2_gre_endpoints' + __table_args__ = ( + sa.UniqueConstraint('host', + name='unique_ml2_gre_endpoints0host'), + model_base.BASEV2.__table_args__ + ) + ip_address = sa.Column(sa.String(64), primary_key=True) + host = sa.Column(sa.String(255), nullable=True) + + def __repr__(self): + return "" % self.ip_address diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/README neutron-9.0.0~b3~dev557/neutron/db/models/README --- neutron-9.0.0~b2~dev280/neutron/db/models/README 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/README 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,6 @@ +This directory is designed to contain all SQLAlchemy models shipped with core +Neutron. + +* The expected directory structure is flat, except for the ML2 plugins. All ML2 + plugin models should fall under the plugins subdirectory (i.e. plugins/ml2/gre_allocation). +* Module names should use singular forms for nouns (port.py, not ports.py). diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/securitygroup.py neutron-9.0.0~b3~dev557/neutron/db/models/securitygroup.py --- neutron-9.0.0~b2~dev280/neutron/db/models/securitygroup.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/securitygroup.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,92 @@ +# Copyright 2012 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.db import standard_attr + + +class SecurityGroup(standard_attr.HasStandardAttributes, model_base.BASEV2, + model_base.HasId, model_base.HasProject): + """Represents a v2 neutron security group.""" + + name = sa.Column(sa.String(attributes.NAME_MAX_LEN)) + + +class DefaultSecurityGroup(model_base.BASEV2, model_base.HasProjectPrimaryKey): + __tablename__ = 'default_security_group' + + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=False) + security_group = orm.relationship( + SecurityGroup, lazy='joined', + backref=orm.backref('default_security_group', cascade='all,delete'), + primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id", + ) + + +class SecurityGroupPortBinding(model_base.BASEV2): + """Represents binding between neutron ports and security profiles.""" + + port_id = sa.Column(sa.String(36), + sa.ForeignKey("ports.id", + ondelete='CASCADE'), + primary_key=True) + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id"), + primary_key=True) + revises_on_change = ('ports', ) + # Add a relationship to the Port model in order to instruct SQLAlchemy to + # eagerly load security group bindings + ports = orm.relationship( + models_v2.Port, + backref=orm.backref("security_groups", + lazy='joined', cascade='delete')) + + +class SecurityGroupRule(standard_attr.HasStandardAttributes, model_base.BASEV2, + model_base.HasId, model_base.HasProject): + """Represents a v2 neutron security group rule.""" + + security_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=False) + + remote_group_id = sa.Column(sa.String(36), + sa.ForeignKey("securitygroups.id", + ondelete="CASCADE"), + nullable=True) + revises_on_change = ('security_group', ) + direction = sa.Column(sa.Enum('ingress', 'egress', + name='securitygrouprules_direction')) + ethertype = sa.Column(sa.String(40)) + protocol = sa.Column(sa.String(40)) + port_range_min = sa.Column(sa.Integer) + port_range_max = sa.Column(sa.Integer) + remote_ip_prefix = sa.Column(sa.String(255)) + security_group = orm.relationship( + SecurityGroup, + backref=orm.backref('rules', cascade='all,delete', lazy='joined'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") + source_group = orm.relationship( + SecurityGroup, + backref=orm.backref('source_rules', cascade='all,delete'), + primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models/subnet_service_type.py neutron-9.0.0~b3~dev557/neutron/db/models/subnet_service_type.py --- neutron-9.0.0~b2~dev280/neutron/db/models/subnet_service_type.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models/subnet_service_type.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,42 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company, LP +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.api.v2 import attributes +from neutron.db import model_base +from neutron.db import models_v2 + + +class SubnetServiceType(model_base.BASEV2): + """Subnet Service Types table""" + + __tablename__ = "subnet_service_types" + + subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('subnets.id', ondelete="CASCADE")) + # Service types must be valid device owners, therefore share max length + service_type = sa.Column(sa.String( + length=attributes.DEVICE_OWNER_MAX_LEN)) + subnet = orm.relationship(models_v2.Subnet, + backref=orm.backref('service_types', + lazy='joined', + cascade='all, delete-orphan', + uselist=True)) + __table_args__ = ( + sa.PrimaryKeyConstraint('subnet_id', 'service_type'), + model_base.BASEV2.__table_args__ + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/models_v2.py neutron-9.0.0~b3~dev557/neutron/db/models_v2.py --- neutron-9.0.0~b2~dev280/neutron/db/models_v2.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/models_v2.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,15 +13,16 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron_lib import constants import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy import sql from neutron.api.v2 import attributes as attr -from neutron.common import constants from neutron.db import model_base from neutron.db.network_dhcp_agent_binding import models as ndab_model from neutron.db import rbac_db_models +from neutron.db import standard_attr # NOTE(kevinbenton): these are here for external projects that expect them @@ -96,6 +97,7 @@ network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), nullable=False, primary_key=True) + revises_on_change = ('port', ) class Route(object): @@ -113,7 +115,7 @@ primary_key=True) -class Port(model_base.HasStandardAttributes, model_base.BASEV2, +class Port(standard_attr.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a port on a Neutron v2 network.""" @@ -129,7 +131,6 @@ device_id = sa.Column(sa.String(attr.DEVICE_ID_MAX_LEN), nullable=False) device_owner = sa.Column(sa.String(attr.DEVICE_OWNER_MAX_LEN), nullable=False) - dns_name = sa.Column(sa.String(255), nullable=True) __table_args__ = ( sa.Index( 'ix_ports_network_id_mac_address', 'network_id', 'mac_address'), @@ -144,8 +145,7 @@ def __init__(self, id=None, tenant_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, - device_id=None, device_owner=None, fixed_ips=None, - dns_name=None, **kwargs): + device_id=None, device_owner=None, fixed_ips=None, **kwargs): super(Port, self).__init__(**kwargs) self.id = id self.tenant_id = tenant_id @@ -155,7 +155,6 @@ self.admin_state_up = admin_state_up self.device_owner = device_owner self.device_id = device_id - self.dns_name = dns_name # Since this is a relationship only set it if one is passed in. if fixed_ips: self.fixed_ips = fixed_ips @@ -175,7 +174,7 @@ order = sa.Column(sa.Integer, nullable=False, server_default='0') -class Subnet(model_base.HasStandardAttributes, model_base.BASEV2, +class Subnet(standard_attr.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a neutron subnet. @@ -215,11 +214,13 @@ ipv6_ra_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, constants.DHCPV6_STATEFUL, constants.DHCPV6_STATELESS, - name='ipv6_ra_modes'), nullable=True) + name='ipv6_ra_modes'), + nullable=True) ipv6_address_mode = sa.Column(sa.Enum(constants.IPV6_SLAAC, - constants.DHCPV6_STATEFUL, - constants.DHCPV6_STATELESS, - name='ipv6_address_modes'), nullable=True) + constants.DHCPV6_STATEFUL, + constants.DHCPV6_STATELESS, + name='ipv6_address_modes'), + nullable=True) # subnets don't have their own rbac_entries, they just inherit from # the network rbac entries rbac_entries = orm.relationship( @@ -242,7 +243,7 @@ primary_key=True) -class SubnetPool(model_base.HasStandardAttributes, model_base.BASEV2, +class SubnetPool(standard_attr.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a neutron subnet pool. """ @@ -264,7 +265,7 @@ lazy='joined') -class Network(model_base.HasStandardAttributes, model_base.BASEV2, +class Network(standard_attr.HasStandardAttributes, model_base.BASEV2, HasId, HasTenant): """Represents a v2 neutron network.""" @@ -275,7 +276,6 @@ lazy="joined") status = sa.Column(sa.String(16)) admin_state_up = sa.Column(sa.Boolean) - mtu = sa.Column(sa.Integer, nullable=True) vlan_transparent = sa.Column(sa.Boolean, nullable=True) rbac_entries = orm.relationship(rbac_db_models.NetworkRBAC, backref='network', lazy='joined', diff -Nru neutron-9.0.0~b2~dev280/neutron/db/netmtu_db.py neutron-9.0.0~b3~dev557/neutron/db/netmtu_db.py --- neutron-9.0.0~b2~dev280/neutron/db/netmtu_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/netmtu_db.py 2016-08-03 20:10:33.000000000 +0000 @@ -13,16 +13,27 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg + from neutron.api.v2 import attributes from neutron.db import db_base_plugin_v2 from neutron.extensions import netmtu +from neutron.plugins.common import utils + + +CONF = cfg.CONF +# TODO(ihrachys): the class is not used in the tree; mixins are generally +# discouraged these days, so maybe it's worth considering deprecation for the +# class. Interested plugins would be able to ship it on their own, if they want +# to stick to mixins, or implement the behaviour in another way. class Netmtu_db_mixin(object): - """Mixin class to add network MTU methods to db_base_plugin_v2.""" + """Mixin class to add network MTU support to db_base_plugin_v2.""" def _extend_network_dict_mtu(self, network_res, network_db): - network_res[netmtu.MTU] = network_db.mtu + # don't use network_db argument since MTU is not persisted in database + network_res[netmtu.MTU] = utils.get_deployment_physnet_mtu() return network_res db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( diff -Nru neutron-9.0.0~b2~dev280/neutron/db/port_security/models.py neutron-9.0.0~b3~dev557/neutron/db/port_security/models.py --- neutron-9.0.0~b2~dev280/neutron/db/port_security/models.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/port_security/models.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy as sa +from sqlalchemy import orm + +from neutron.db import model_base +from neutron.db import models_v2 + + +class PortSecurityBinding(model_base.BASEV2): + port_id = sa.Column(sa.String(36), + sa.ForeignKey('ports.id', ondelete="CASCADE"), + primary_key=True) + port_security_enabled = sa.Column(sa.Boolean(), nullable=False) + + # Add a relationship to the Port model in order to be to able to + # instruct SQLAlchemy to eagerly load port security binding + port = orm.relationship( + models_v2.Port, + backref=orm.backref("port_security", uselist=False, + cascade='delete', lazy='joined')) + + +class NetworkSecurityBinding(model_base.BASEV2): + network_id = sa.Column(sa.String(36), + sa.ForeignKey('networks.id', ondelete="CASCADE"), + primary_key=True) + port_security_enabled = sa.Column(sa.Boolean(), nullable=False) + + # Add a relationship to the Port model in order to be able to instruct + # SQLAlchemy to eagerly load default port security setting for ports + # on this network + network = orm.relationship( + models_v2.Network, + backref=orm.backref("port_security", uselist=False, + cascade='delete', lazy='joined')) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/portsecurity_db_common.py neutron-9.0.0~b3~dev557/neutron/db/portsecurity_db_common.py --- neutron-9.0.0~b2~dev280/neutron/db/portsecurity_db_common.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/portsecurity_db_common.py 2016-08-03 20:10:33.000000000 +0000 @@ -12,42 +12,9 @@ # License for the specific language governing permissions and limitations # under the License. -import sqlalchemy as sa -from sqlalchemy import orm -from sqlalchemy.orm import exc - -from neutron.db import model_base -from neutron.db import models_v2 from neutron.extensions import portsecurity as psec - - -class PortSecurityBinding(model_base.BASEV2): - port_id = sa.Column(sa.String(36), - sa.ForeignKey('ports.id', ondelete="CASCADE"), - primary_key=True) - port_security_enabled = sa.Column(sa.Boolean(), nullable=False) - - # Add a relationship to the Port model in order to be to able to - # instruct SQLAlchemy to eagerly load port security binding - port = orm.relationship( - models_v2.Port, - backref=orm.backref("port_security", uselist=False, - cascade='delete', lazy='joined')) - - -class NetworkSecurityBinding(model_base.BASEV2): - network_id = sa.Column(sa.String(36), - sa.ForeignKey('networks.id', ondelete="CASCADE"), - primary_key=True) - port_security_enabled = sa.Column(sa.Boolean(), nullable=False) - - # Add a relationship to the Port model in order to be able to instruct - # SQLAlchemy to eagerly load default port security setting for ports - # on this network - network = orm.relationship( - models_v2.Network, - backref=orm.backref("port_security", uselist=False, - cascade='delete', lazy='joined')) +from neutron.objects.network.extensions import port_security as n_ps +from neutron.objects.port.extensions import port_security as p_ps class PortSecurityDbCommon(object): @@ -60,90 +27,71 @@ response_data[psec.PORTSECURITY] = ( db_data['port_security'][psec.PORTSECURITY]) - def _process_network_port_security_create( - self, context, network_req, network_res): - with context.session.begin(subtransactions=True): - db = NetworkSecurityBinding( - network_id=network_res['id'], - port_security_enabled=network_req[psec.PORTSECURITY]) - context.session.add(db) - network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY] - return self._make_network_port_security_dict(db) + def _process_port_security_create( + self, context, obj_cls, res_name, req, res): + obj = obj_cls( + context, + id=res['id'], + port_security_enabled=req[psec.PORTSECURITY] + ) + obj.create() + res[psec.PORTSECURITY] = req[psec.PORTSECURITY] + return self._make_port_security_dict(obj, res_name) def _process_port_port_security_create( self, context, port_req, port_res): - with context.session.begin(subtransactions=True): - db = PortSecurityBinding( - port_id=port_res['id'], - port_security_enabled=port_req[psec.PORTSECURITY]) - context.session.add(db) - port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY] - return self._make_port_security_dict(db) + self._process_port_security_create( + context, p_ps.PortSecurity, 'port', + port_req, port_res) + + def _process_network_port_security_create( + self, context, network_req, network_res): + self._process_port_security_create( + context, n_ps.NetworkPortSecurity, 'network', + network_req, network_res) + + def _get_security_binding(self, context, obj_cls, res_id): + obj = obj_cls.get_object(context, id=res_id) + # NOTE(ihrachys) the resource may have been created before port + # security extension was enabled; return default value + return obj.port_security_enabled if obj else psec.DEFAULT_PORT_SECURITY def _get_network_security_binding(self, context, network_id): - try: - query = self._model_query(context, NetworkSecurityBinding) - binding = query.filter( - NetworkSecurityBinding.network_id == network_id).one() - return binding.port_security_enabled - except exc.NoResultFound: - # NOTE(ihrachys) the resource may have been created before port - # security extension was enabled; return default value - return psec.DEFAULT_PORT_SECURITY + return self._get_security_binding( + context, n_ps.NetworkPortSecurity, network_id) def _get_port_security_binding(self, context, port_id): - try: - query = self._model_query(context, PortSecurityBinding) - binding = query.filter( - PortSecurityBinding.port_id == port_id).one() - return binding.port_security_enabled - except exc.NoResultFound: - # NOTE(ihrachys) the resource may have been created before port - # security extension was enabled; return default value - return psec.DEFAULT_PORT_SECURITY + return self._get_security_binding(context, p_ps.PortSecurity, port_id) def _process_port_port_security_update( self, context, port_req, port_res): - if psec.PORTSECURITY not in port_req: - return - port_security_enabled = port_req[psec.PORTSECURITY] - try: - query = self._model_query(context, PortSecurityBinding) - port_id = port_res['id'] - binding = query.filter( - PortSecurityBinding.port_id == port_id).one() - binding.port_security_enabled = port_security_enabled - port_res[psec.PORTSECURITY] = port_security_enabled - except exc.NoResultFound: - # NOTE(ihrachys) the resource may have been created before port - # security extension was enabled; create the binding model - self._process_port_port_security_create( - context, port_req, port_res) + self._process_port_security_update( + context, p_ps.PortSecurity, 'port', port_req, port_res) def _process_network_port_security_update( self, context, network_req, network_res): - if psec.PORTSECURITY not in network_req: + self._process_port_security_update( + context, n_ps.NetworkPortSecurity, 'network', + network_req, network_res) + + def _process_port_security_update( + self, context, obj_cls, res_name, req, res): + if psec.PORTSECURITY not in req: return - port_security_enabled = network_req[psec.PORTSECURITY] - try: - query = self._model_query(context, NetworkSecurityBinding) - network_id = network_res['id'] - binding = query.filter( - NetworkSecurityBinding.network_id == network_id).one() - binding.port_security_enabled = port_security_enabled - network_res[psec.PORTSECURITY] = port_security_enabled - except exc.NoResultFound: + port_security_enabled = req[psec.PORTSECURITY] + + obj = obj_cls.get_object(context, id=res['id']) + if obj: + obj.port_security_enabled = port_security_enabled + obj.update() + res[psec.PORTSECURITY] = port_security_enabled + else: # NOTE(ihrachys) the resource may have been created before port # security extension was enabled; create the binding model - self._process_network_port_security_create( - context, network_req, network_res) + self._process_port_security_create( + context, obj_cls, res_name, req, res) - def _make_network_port_security_dict(self, port_security, fields=None): - res = {'network_id': port_security['network_id'], - psec.PORTSECURITY: port_security.port_security_enabled} - return self._fields(res, fields) - - def _make_port_security_dict(self, port, fields=None): - res = {'port_id': port['port_id'], - psec.PORTSECURITY: port.port_security_enabled} - return self._fields(res, fields) + def _make_port_security_dict(self, res, res_name, fields=None): + res_ = {'%s_id' % res_name: res.id, + psec.PORTSECURITY: res.port_security_enabled} + return self._fields(res_, fields) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/provisioning_blocks.py neutron-9.0.0~b3~dev557/neutron/db/provisioning_blocks.py --- neutron-9.0.0~b2~dev280/neutron/db/provisioning_blocks.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/provisioning_blocks.py 2016-08-29 20:05:49.000000000 +0000 @@ -23,6 +23,7 @@ from neutron.db import api as db_api from neutron.db import model_base from neutron.db import models_v2 +from neutron.db import standard_attr LOG = logging.getLogger(__name__) PROVISIONING_COMPLETE = 'provisioning_complete' @@ -36,7 +37,7 @@ # the standard attr id of the thing we want to block standard_attr_id = ( sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), - sa.ForeignKey(model_base.StandardAttribute.id, + sa.ForeignKey(standard_attr.StandardAttribute.id, ondelete="CASCADE"), primary_key=True)) # the entity that wants to block the status change (e.g. L2 Agent) @@ -152,6 +153,22 @@ context=context, object_id=object_id) +def is_object_blocked(context, object_id, object_type): + """Return boolean indicating if object has a provisioning block. + + :param context: neutron api request context + :param object_id: ID of object that has been provisioned + :param object_type: callback resource type of the object + """ + standard_attr_id = _get_standard_attr_id(context, object_id, + object_type) + if not standard_attr_id: + # object doesn't exist so it has no blocks + return False + return bool(context.session.query(ProvisioningBlock).filter_by( + standard_attr_id=standard_attr_id).count()) + + def _get_standard_attr_id(context, object_id, object_type): model = _RESOURCE_TO_MODEL_MAP.get(object_type) if not model: diff -Nru neutron-9.0.0~b2~dev280/neutron/db/qos/models.py neutron-9.0.0~b3~dev557/neutron/db/qos/models.py --- neutron-9.0.0~b2~dev280/neutron/db/qos/models.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/qos/models.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,7 +21,7 @@ from neutron.db import rbac_db_models -class QosPolicy(model_base.BASEV2, model_base.HasId, model_base.HasTenant): +class QosPolicy(model_base.BASEV2, model_base.HasId, model_base.HasProject): __tablename__ = 'qos_policies' name = sa.Column(sa.String(attrs.NAME_MAX_LEN)) description = sa.Column(sa.String(attrs.DESCRIPTION_MAX_LEN)) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/quota/api.py neutron-9.0.0~b3~dev557/neutron/db/quota/api.py --- neutron-9.0.0~b2~dev280/neutron/db/quota/api.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/quota/api.py 2016-08-29 20:05:49.000000000 +0000 @@ -121,6 +121,7 @@ usage_data.dirty) +@db_api.context_manager.writer def set_quota_usage_dirty(context, resource, tenant_id, dirty=True): """Set quota usage dirty bit for a given resource and tenant. @@ -134,6 +135,7 @@ return query.update({'dirty': dirty}) +@db_api.context_manager.writer def set_resources_quota_usage_dirty(context, resources, tenant_id, dirty=True): """Set quota usage dirty bit for a given tenant and multiple resources. @@ -151,6 +153,7 @@ return query.update({'dirty': dirty}, synchronize_session=False) +@db_api.context_manager.writer def set_all_quota_usage_dirty(context, resource, dirty=True): """Set the dirty bit on quota usage for all tenants. @@ -196,6 +199,7 @@ for delta in resv.resource_deltas)) +@db_api.context_manager.writer def remove_reservation(context, reservation_id, set_dirty=False): delete_query = context.session.query(quota_models.Reservation).filter_by( id=reservation_id) @@ -250,6 +254,7 @@ for (resource, exp, total_reserved) in resv_query) +@db_api.context_manager.writer def remove_expired_reservations(context, tenant_id=None): now = utcnow() resv_query = context.session.query(quota_models.Reservation) diff -Nru neutron-9.0.0~b2~dev280/neutron/db/quota/driver.py neutron-9.0.0~b3~dev557/neutron/db/quota/driver.py --- neutron-9.0.0~b2~dev280/neutron/db/quota/driver.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/quota/driver.py 2016-08-03 20:10:33.000000000 +0000 @@ -14,7 +14,6 @@ # under the License. from neutron_lib import exceptions -from oslo_db import api as oslo_db_api from oslo_log import log from neutron.common import exceptions as n_exc @@ -151,17 +150,13 @@ return dict((k, v) for k, v in quotas.items()) def _handle_expired_reservations(self, context, tenant_id): - LOG.debug("Deleting expired reservations for tenant:%s" % tenant_id) + LOG.debug("Deleting expired reservations for tenant:%s", tenant_id) # Delete expired reservations (we don't want them to accrue # in the database) quota_api.remove_expired_reservations( context, tenant_id=tenant_id) - @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, - retry_interval=0.1, - inc_retry_interval=True, - retry_on_request=True, - exception_checker=db_api.is_retriable) + @db_api.retry_db_errors def make_reservation(self, context, tenant_id, resources, deltas, plugin): # Lock current reservation table # NOTE(salv-orlando): This routine uses DB write locks. diff -Nru neutron-9.0.0~b2~dev280/neutron/db/quota/models.py neutron-9.0.0~b3~dev557/neutron/db/quota/models.py --- neutron-9.0.0~b2~dev280/neutron/db/quota/models.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/quota/models.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,7 +16,6 @@ from sqlalchemy import orm from sqlalchemy import sql -from neutron.api.v2 import attributes as attr from neutron.db import model_base @@ -31,8 +30,8 @@ amount = sa.Column(sa.Integer) -class Reservation(model_base.BASEV2, model_base.HasId): - tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN)) +class Reservation(model_base.BASEV2, model_base.HasId, + model_base.HasProjectNoIndex): expiration = sa.Column(sa.DateTime()) resource_deltas = orm.relationship(ResourceDelta, backref='reservation', @@ -40,7 +39,7 @@ cascade='all, delete-orphan') -class Quota(model_base.BASEV2, model_base.HasId, model_base.HasTenant): +class Quota(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represent a single quota override for a tenant. If there is no row for a given tenant id and resource, then the @@ -50,13 +49,11 @@ limit = sa.Column(sa.Integer) -class QuotaUsage(model_base.BASEV2): +class QuotaUsage(model_base.BASEV2, model_base.HasProjectPrimaryKeyIndex): """Represents the current usage for a given resource.""" resource = sa.Column(sa.String(255), nullable=False, primary_key=True, index=True) - tenant_id = sa.Column(sa.String(attr.TENANT_ID_MAX_LEN), nullable=False, - primary_key=True, index=True) dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false()) in_use = sa.Column(sa.Integer, nullable=False, diff -Nru neutron-9.0.0~b2~dev280/neutron/db/rbac_db_models.py neutron-9.0.0~b3~dev557/neutron/db/rbac_db_models.py --- neutron-9.0.0~b2~dev280/neutron/db/rbac_db_models.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/rbac_db_models.py 2016-08-29 20:05:49.000000000 +0000 @@ -35,7 +35,7 @@ "'%(object_type)s'. Valid actions: %(valid_actions)s") -class RBACColumns(model_base.HasId, model_base.HasTenant): +class RBACColumns(model_base.HasId, model_base.HasProject): """Mixin that object-specific RBAC tables should inherit. All RBAC tables should inherit directly from this one because diff -Nru neutron-9.0.0~b2~dev280/neutron/db/securitygroups_db.py neutron-9.0.0~b3~dev557/neutron/db/securitygroups_db.py --- neutron-9.0.0~b2~dev280/neutron/db/securitygroups_db.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/securitygroups_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,14 +12,12 @@ # License for the specific language governing permissions and limitations # under the License. +import sys + import netaddr from neutron_lib.api import validators from neutron_lib import constants -from oslo_db import exception as db_exc -from oslo_log import log as logging from oslo_utils import uuidutils -import sqlalchemy as sa -from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy.orm import scoped_session @@ -29,91 +27,15 @@ from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources +from neutron.common import _deprecate from neutron.common import constants as n_const from neutron.common import utils from neutron.db import api as db_api from neutron.db import db_base_plugin_v2 -from neutron.db import model_base -from neutron.db import models_v2 +from neutron.db.models import securitygroup as sg_models from neutron.extensions import securitygroup as ext_sg -LOG = logging.getLogger(__name__) - - -class SecurityGroup(model_base.HasStandardAttributes, model_base.BASEV2, - model_base.HasId, model_base.HasTenant): - """Represents a v2 neutron security group.""" - - name = sa.Column(sa.String(attributes.NAME_MAX_LEN)) - - -class DefaultSecurityGroup(model_base.BASEV2): - __tablename__ = 'default_security_group' - - tenant_id = sa.Column(sa.String(attributes.TENANT_ID_MAX_LEN), - primary_key=True, nullable=False) - security_group_id = sa.Column(sa.String(36), - sa.ForeignKey("securitygroups.id", - ondelete="CASCADE"), - nullable=False) - security_group = orm.relationship( - SecurityGroup, lazy='joined', - backref=orm.backref('default_security_group', cascade='all,delete'), - primaryjoin="SecurityGroup.id==DefaultSecurityGroup.security_group_id", - ) - - -class SecurityGroupPortBinding(model_base.BASEV2): - """Represents binding between neutron ports and security profiles.""" - - port_id = sa.Column(sa.String(36), - sa.ForeignKey("ports.id", - ondelete='CASCADE'), - primary_key=True) - security_group_id = sa.Column(sa.String(36), - sa.ForeignKey("securitygroups.id"), - primary_key=True) - - # Add a relationship to the Port model in order to instruct SQLAlchemy to - # eagerly load security group bindings - ports = orm.relationship( - models_v2.Port, - backref=orm.backref("security_groups", - lazy='joined', cascade='delete')) - - -class SecurityGroupRule(model_base.HasStandardAttributes, model_base.BASEV2, - model_base.HasId, model_base.HasTenant): - """Represents a v2 neutron security group rule.""" - - security_group_id = sa.Column(sa.String(36), - sa.ForeignKey("securitygroups.id", - ondelete="CASCADE"), - nullable=False) - - remote_group_id = sa.Column(sa.String(36), - sa.ForeignKey("securitygroups.id", - ondelete="CASCADE"), - nullable=True) - - direction = sa.Column(sa.Enum('ingress', 'egress', - name='securitygrouprules_direction')) - ethertype = sa.Column(sa.String(40)) - protocol = sa.Column(sa.String(40)) - port_range_min = sa.Column(sa.Integer) - port_range_max = sa.Column(sa.Integer) - remote_ip_prefix = sa.Column(sa.String(255)) - security_group = orm.relationship( - SecurityGroup, - backref=orm.backref('rules', cascade='all,delete', lazy='joined'), - primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") - source_group = orm.relationship( - SecurityGroup, - backref=orm.backref('source_rules', cascade='all,delete'), - primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") - - class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): """Mixin class to add security group to db_base_plugin_v2.""" @@ -157,20 +79,20 @@ self._ensure_default_security_group(context, tenant_id) with db_api.autonested_transaction(context.session): - security_group_db = SecurityGroup(id=s.get('id') or ( + security_group_db = sg_models.SecurityGroup(id=s.get('id') or ( uuidutils.generate_uuid()), description=s['description'], tenant_id=tenant_id, name=s['name']) context.session.add(security_group_db) if default_sg: - context.session.add(DefaultSecurityGroup( + context.session.add(sg_models.DefaultSecurityGroup( security_group=security_group_db, tenant_id=security_group_db['tenant_id'])) for ethertype in ext_sg.sg_supported_ethertypes: if default_sg: # Allow intercommunication - ingress_rule = SecurityGroupRule( + ingress_rule = sg_models.SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', @@ -178,7 +100,7 @@ source_group=security_group_db) context.session.add(ingress_rule) - egress_rule = SecurityGroupRule( + egress_rule = sg_models.SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='egress', @@ -215,7 +137,7 @@ marker_obj = self._get_marker_obj(context, 'security_group', limit, marker) return self._get_collection(context, - SecurityGroup, + sg_models.SecurityGroup, self._make_security_group_dict, filters=filters, fields=fields, sorts=sorts, @@ -223,7 +145,7 @@ page_reverse=page_reverse) def get_security_groups_count(self, context, filters=None): - return self._get_collection_count(context, SecurityGroup, + return self._get_collection_count(context, sg_models.SecurityGroup, filters=filters) def get_security_group(self, context, id, fields=None, tenant_id=None): @@ -248,8 +170,8 @@ def _get_security_group(self, context, id): try: - query = self._model_query(context, SecurityGroup) - sg = query.filter(SecurityGroup.id == id).one() + query = self._model_query(context, sg_models.SecurityGroup) + sg = query.filter(sg_models.SecurityGroup.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupNotFound(id=id) @@ -275,6 +197,9 @@ **kwargs) with context.session.begin(subtransactions=True): + # pass security_group_rule_ids to ensure + # consistency with deleted rules + kwargs['security_group_rule_ids'] = [r['id'] for r in sg.rules] self._registry_notify(resources.SECURITY_GROUP, events.PRECOMMIT_DELETE, exc_cls=ext_sg.SecurityGroupInUse, id=id, @@ -331,21 +256,21 @@ def _create_port_security_group_binding(self, context, port_id, security_group_id): with context.session.begin(subtransactions=True): - db = SecurityGroupPortBinding(port_id=port_id, + db = sg_models.SecurityGroupPortBinding(port_id=port_id, security_group_id=security_group_id) context.session.add(db) def _get_port_security_group_bindings(self, context, filters=None, fields=None): return self._get_collection(context, - SecurityGroupPortBinding, + sg_models.SecurityGroupPortBinding, self._make_security_group_binding_dict, filters=filters, fields=fields) def _delete_port_security_group_bindings(self, context, port_id): - query = self._model_query(context, SecurityGroupPortBinding) + query = self._model_query(context, sg_models.SecurityGroupPortBinding) bindings = query.filter( - SecurityGroupPortBinding.port_id == port_id) + sg_models.SecurityGroupPortBinding.port_id == port_id) with context.session.begin(subtransactions=True): for binding in bindings: context.session.delete(binding) @@ -379,8 +304,6 @@ validate=True): if validate: self._validate_security_group_rule(context, security_group_rule) - self._check_for_duplicate_rules_in_db(context, security_group_rule) - rule_dict = security_group_rule['security_group_rule'] kwargs = { 'context': context, @@ -391,7 +314,10 @@ exc_cls=ext_sg.SecurityGroupConflict, **kwargs) with context.session.begin(subtransactions=True): - db = SecurityGroupRule( + if validate: + self._check_for_duplicate_rules_in_db(context, + security_group_rule) + db = sg_models.SecurityGroupRule( id=(rule_dict.get('id') or uuidutils.generate_uuid()), tenant_id=rule_dict['tenant_id'], security_group_id=rule_dict['security_group_id'], @@ -634,7 +560,7 @@ marker_obj = self._get_marker_obj(context, 'security_group_rule', limit, marker) return self._get_collection(context, - SecurityGroupRule, + sg_models.SecurityGroupRule, self._make_security_group_rule_dict, filters=filters, fields=fields, sorts=sorts, @@ -642,7 +568,7 @@ page_reverse=page_reverse) def get_security_group_rules_count(self, context, filters=None): - return self._get_collection_count(context, SecurityGroupRule, + return self._get_collection_count(context, sg_models.SecurityGroupRule, filters=filters) def get_security_group_rule(self, context, id, fields=None): @@ -651,8 +577,8 @@ def _get_security_group_rule(self, context, id): try: - query = self._model_query(context, SecurityGroupRule) - sgr = query.filter(SecurityGroupRule.id == id).one() + query = self._model_query(context, sg_models.SecurityGroupRule) + sgr = query.filter(sg_models.SecurityGroupRule.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupRuleNotFound(id=id) return sgr @@ -667,8 +593,9 @@ exc_cls=ext_sg.SecurityGroupRuleInUse, **kwargs) with context.session.begin(subtransactions=True): - query = self._model_query(context, SecurityGroupRule).filter( - SecurityGroupRule.id == id) + query = self._model_query(context, + sg_models.SecurityGroupRule).filter( + sg_models.SecurityGroupRule.id == id) self._registry_notify(resources.SECURITY_GROUP_RULE, events.PRECOMMIT_DELETE, @@ -716,31 +643,18 @@ :returns: the default security group id for given tenant. """ try: - query = self._model_query(context, DefaultSecurityGroup) + query = self._model_query(context, sg_models.DefaultSecurityGroup) default_group = query.filter_by(tenant_id=tenant_id).one() return default_group['security_group_id'] except exc.NoResultFound: - return self._create_default_security_group(context, tenant_id) - - def _create_default_security_group(self, context, tenant_id): - security_group = { - 'security_group': - {'name': 'default', - 'tenant_id': tenant_id, - 'description': _('Default security group')} - } - try: - security_group = self.create_security_group( - context, security_group, default_sg=True) - return security_group['id'] - except db_exc.DBDuplicateEntry as ex: - # default security group was created concurrently - LOG.debug("Duplicate default security group %s was " - "not created", ex.value) - # raise a retry request to restart the whole process since - # we could be in a REPEATABLE READ isolation level and won't - # be able to see the SG group in this transaction. - raise db_exc.RetryRequest(ex) + security_group = { + 'security_group': + {'name': 'default', + 'tenant_id': tenant_id, + 'description': _('Default security group')} + } + return self.create_security_group( + context, security_group, default_sg=True)['id'] def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. @@ -827,3 +741,9 @@ updated_port[ext_sg.SECURITYGROUPS] = ( original_port[ext_sg.SECURITYGROUPS]) return need_notify + + +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE +_OLD_REF = sys.modules[__name__] +sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), sg_models) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE diff -Nru neutron-9.0.0~b2~dev280/neutron/db/securitygroups_rpc_base.py neutron-9.0.0~b3~dev557/neutron/db/securitygroups_rpc_base.py --- neutron-9.0.0~b2~dev280/neutron/db/securitygroups_rpc_base.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/securitygroups_rpc_base.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,7 +21,8 @@ from neutron._i18n import _, _LW from neutron.common import ipv6_utils as ipv6 from neutron.common import utils -from neutron.db.allowed_address_pairs import models as addr_pair +from neutron.db.models import allowed_address_pair as aap_models +from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import securitygroups_db as sg_db from neutron.extensions import securitygroup as ext_sg @@ -234,8 +235,8 @@ def _select_sg_ids_for_ports(self, context, ports): if not ports: return [] - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + sg_binding_port = sg_models.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_models.SecurityGroupPortBinding.security_group_id query = context.session.query(sg_binding_sgid) query = query.filter(sg_binding_port.in_(ports.keys())) return query.all() @@ -243,14 +244,14 @@ def _select_rules_for_ports(self, context, ports): if not ports: return [] - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + sg_binding_port = sg_models.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_models.SecurityGroupPortBinding.security_group_id - sgr_sgid = sg_db.SecurityGroupRule.security_group_id + sgr_sgid = sg_models.SecurityGroupRule.security_group_id query = context.session.query(sg_binding_port, - sg_db.SecurityGroupRule) - query = query.join(sg_db.SecurityGroupRule, + sg_models.SecurityGroupRule) + query = query.join(sg_models.SecurityGroupRule, sgr_sgid == sg_binding_sgid) query = query.filter(sg_binding_port.in_(ports.keys())) return query.all() @@ -263,21 +264,21 @@ ips_by_group[remote_group_id] = set() ip_port = models_v2.IPAllocation.port_id - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id - sg_binding_sgid = sg_db.SecurityGroupPortBinding.security_group_id + sg_binding_port = sg_models.SecurityGroupPortBinding.port_id + sg_binding_sgid = sg_models.SecurityGroupPortBinding.security_group_id # Join the security group binding table directly to the IP allocation # table instead of via the Port table skip an unnecessary intermediary query = context.session.query(sg_binding_sgid, models_v2.IPAllocation.ip_address, - addr_pair.AllowedAddressPair.ip_address) + aap_models.AllowedAddressPair.ip_address) query = query.join(models_v2.IPAllocation, ip_port == sg_binding_port) # Outerjoin because address pairs may be null and we still want the # IP for the port. query = query.outerjoin( - addr_pair.AllowedAddressPair, - sg_binding_port == addr_pair.AllowedAddressPair.port_id) + aap_models.AllowedAddressPair, + sg_binding_port == aap_models.AllowedAddressPair.port_id) query = query.filter(sg_binding_sgid.in_(remote_group_ids)) # Each allowed address pair IP record for a port beyond the 1st # will have a duplicate regular IP in the query response since diff -Nru neutron-9.0.0~b2~dev280/neutron/db/segments_db.py neutron-9.0.0~b3~dev557/neutron/db/segments_db.py --- neutron-9.0.0~b2~dev280/neutron/db/segments_db.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/segments_db.py 2016-08-03 20:10:33.000000000 +0000 @@ -16,6 +16,9 @@ from sqlalchemy.orm import exc from neutron._i18n import _LI +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.db import model_base LOG = logging.getLogger(__name__) @@ -60,9 +63,9 @@ SEGMENTATION_ID: record.segmentation_id} -def add_network_segment(session, network_id, segment, segment_index=0, +def add_network_segment(context, network_id, segment, segment_index=0, is_dynamic=False): - with session.begin(subtransactions=True): + with context.session.begin(subtransactions=True): record = NetworkSegment( id=uuidutils.generate_uuid(), network_id=network_id, @@ -72,7 +75,12 @@ segment_index=segment_index, is_dynamic=is_dynamic ) - session.add(record) + context.session.add(record) + registry.notify(resources.SEGMENT, + events.PRECOMMIT_CREATE, + trigger=add_network_segment, + context=context, + segment=record) segment['id'] = record.id LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network " "%(network_id)s"), diff -Nru neutron-9.0.0~b2~dev280/neutron/db/standard_attr.py neutron-9.0.0~b3~dev557/neutron/db/standard_attr.py --- neutron-9.0.0~b2~dev280/neutron/db/standard_attr.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/standard_attr.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,129 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_db.sqlalchemy import models +import sqlalchemy as sa +from sqlalchemy.ext.associationproxy import association_proxy +from sqlalchemy.ext import declarative + +from neutron.api.v2 import attributes as attr +from neutron.db import model_base + + +class StandardAttribute(model_base.BASEV2, models.TimestampMixin): + """Common table to associate all Neutron API resources. + + By having Neutron objects related to this table, we can associate new + tables that apply to many Neutron objects (e.g. timestamps, rbac entries) + to this table to avoid schema duplication while maintaining referential + integrity. + + NOTE(kevinbenton): This table should not have more columns added to it + unless we are absolutely certain the new column will have a value for + every single type of Neutron resource. Otherwise this table will be filled + with NULL entries for combinations that don't make sense. Additionally, + by keeping this table small we can ensure that performance isn't adversely + impacted for queries on objects. + """ + + # sqlite doesn't support auto increment on big integers so we use big int + # for everything but sqlite + id = sa.Column(sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), + primary_key=True, autoincrement=True) + + # NOTE(kevinbenton): this column is redundant information, but it allows + # operators/devs to look at the contents of this table and know which table + # the corresponding object is in. + # 255 was selected as a max just because it's the varchar ceiling in mysql + # before a 2-byte prefix is required. We shouldn't get anywhere near this + # limit with our table names... + resource_type = sa.Column(sa.String(255), nullable=False) + description = sa.Column(sa.String(attr.DESCRIPTION_MAX_LEN)) + + revision_number = sa.Column( + sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), + server_default='0', nullable=False) + + __mapper_args__ = { + # see http://docs.sqlalchemy.org/en/latest/orm/versioning.html for + # details about how this works + "version_id_col": revision_number + } + + +class HasStandardAttributes(object): + @declarative.declared_attr + def standard_attr_id(cls): + return sa.Column( + sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), + sa.ForeignKey(StandardAttribute.id, ondelete="CASCADE"), + unique=True, + nullable=False + ) + + # NOTE(kevinbenton): we have to disable the following pylint check because + # it thinks we are overriding this method in the __init__ method. + #pylint: disable=method-hidden + @declarative.declared_attr + def standard_attr(cls): + return sa.orm.relationship(StandardAttribute, + lazy='joined', + cascade='all, delete-orphan', + single_parent=True, + uselist=False) + + def __init__(self, *args, **kwargs): + standard_attr_keys = ['description', 'created_at', + 'updated_at', 'revision_number'] + standard_attr_kwargs = {} + for key in standard_attr_keys: + if key in kwargs: + standard_attr_kwargs[key] = kwargs.pop(key) + super(HasStandardAttributes, self).__init__(*args, **kwargs) + # here we automatically create the related standard attribute object + self.standard_attr = StandardAttribute( + resource_type=self.__tablename__, **standard_attr_kwargs) + + @declarative.declared_attr + def description(cls): + return association_proxy('standard_attr', 'description') + + @declarative.declared_attr + def created_at(cls): + return association_proxy('standard_attr', 'created_at') + + @declarative.declared_attr + def updated_at(cls): + return association_proxy('standard_attr', 'updated_at') + + def update(self, new_dict): + # ignore the timestamps if they were passed in. For example, this + # happens if code calls update_port with modified results of get_port + new_dict.pop('created_at', None) + new_dict.pop('updated_at', None) + super(HasStandardAttributes, self).update(new_dict) + + @declarative.declared_attr + def revision_number(cls): + return association_proxy('standard_attr', 'revision_number') + + def bump_revision(self): + # SQLAlchemy will bump the version for us automatically if the + # standard attr record is being modified, but we must call this + # for all other modifications or when relevant children are being + # modified (e.g. fixed_ips change should bump port revision) + if self.standard_attr.revision_number is None: + # this is a brand new object uncommited so we don't bump now + return + self.standard_attr.revision_number += 1 diff -Nru neutron-9.0.0~b2~dev280/neutron/db/subnet_service_type_db_models.py neutron-9.0.0~b3~dev557/neutron/db/subnet_service_type_db_models.py --- neutron-9.0.0~b2~dev280/neutron/db/subnet_service_type_db_models.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/subnet_service_type_db_models.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,42 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company, LP +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(ihrachys): consider renaming the module since now it does not contain +# any models at all + +import sys + +from neutron.api.v2 import attributes +from neutron.common import _deprecate +from neutron.db import common_db_mixin +from neutron.db.models import subnet_service_type as sst_model + + +class SubnetServiceTypeMixin(object): + """Mixin class to extend subnet with service type attribute""" + + def _extend_subnet_service_types(self, subnet_res, subnet_db): + subnet_res['service_types'] = [service_type['service_type'] for + service_type in + subnet_db.service_types] + + common_db_mixin.CommonDbMixin.register_dict_extend_funcs( + attributes.SUBNETS, [_extend_subnet_service_types]) + + +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE +_OLD_REF = sys.modules[__name__] +sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), sst_model) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE diff -Nru neutron-9.0.0~b2~dev280/neutron/db/tag_db.py neutron-9.0.0~b3~dev557/neutron/db/tag_db.py --- neutron-9.0.0~b2~dev280/neutron/db/tag_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/db/tag_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,12 +17,13 @@ from sqlalchemy.orm import aliased from neutron.db import model_base +from neutron.db import standard_attr class Tag(model_base.BASEV2): standard_attr_id = sa.Column( sa.BigInteger().with_variant(sa.Integer(), 'sqlite'), - sa.ForeignKey(model_base.StandardAttribute.id, ondelete="CASCADE"), + sa.ForeignKey(standard_attr.StandardAttribute.id, ondelete="CASCADE"), nullable=False, primary_key=True) tag = sa.Column(sa.String(60), nullable=False, primary_key=True) standard_attr = orm.relationship( diff -Nru neutron-9.0.0~b2~dev280/neutron/debug/debug_agent.py neutron-9.0.0~b3~dev557/neutron/debug/debug_agent.py --- neutron-9.0.0~b2~dev280/neutron/debug/debug_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/debug/debug_agent.py 2016-08-03 20:10:33.000000000 +0000 @@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__) -DEVICE_OWNER_NETWORK_PROBE = 'network:probe' +DEVICE_OWNER_NETWORK_PROBE = constants.DEVICE_OWNER_NETWORK_PREFIX + 'probe' DEVICE_OWNER_COMPUTE_PROBE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'probe' diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/agent.py neutron-9.0.0~b3~dev557/neutron/extensions/agent.py --- neutron-9.0.0~b2~dev280/neutron/extensions/agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/agent.py 2016-08-03 20:10:33.000000000 +0000 @@ -154,7 +154,7 @@ def update_agent(self, context, agent): """Disable or Enable the agent. - Discription also can be updated. Some agents cannot be disabled, such + Description also can be updated. Some agents cannot be disabled, such as plugins, services. An error code should be reported in this case. @raise exceptions.BadRequest: """ diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/allowedaddresspairs.py neutron-9.0.0~b3~dev557/neutron/extensions/allowedaddresspairs.py --- neutron-9.0.0~b2~dev280/neutron/extensions/allowedaddresspairs.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/allowedaddresspairs.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,14 +22,9 @@ from neutron._i18n import _ from neutron.api import extensions from neutron.api.v2 import attributes as attr +from neutron.conf.extensions import allowedaddresspairs as addr_pair -allowed_address_pair_opts = [ - #TODO(limao): use quota framework when it support quota for attributes - cfg.IntOpt('max_allowed_address_pair', default=10, - help=_("Maximum number of allowed address pairs")), -] - -cfg.CONF.register_opts(allowed_address_pair_opts) +addr_pair.register_allowed_address_pair_opts() class AllowedAddressPairsMissingIP(nexception.InvalidInput): @@ -93,8 +88,8 @@ if msg: raise webob.exc.HTTPBadRequest(msg) -validators.validators['type:validate_allowed_address_pairs'] = ( - _validate_allowed_address_pairs) +validators.add_validator('validate_allowed_address_pairs', + _validate_allowed_address_pairs) ADDRESS_PAIRS = 'allowed_address_pairs' EXTENDED_ATTRIBUTES_2_0 = { diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/availability_zone.py neutron-9.0.0~b3~dev557/neutron/extensions/availability_zone.py --- neutron-9.0.0~b2~dev280/neutron/extensions/availability_zone.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/availability_zone.py 2016-08-29 20:05:49.000000000 +0000 @@ -48,9 +48,8 @@ msg = _("Too many availability_zone_hints specified") raise exceptions.InvalidInput(error_message=msg) - -validators.validators['type:availability_zone_hints'] = ( - _validate_availability_zone_hints) +validators.add_validator('availability_zone_hints', + _validate_availability_zone_hints) # Attribute Map RESOURCE_NAME = 'availability_zone' diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/dns.py neutron-9.0.0~b3~dev557/neutron/extensions/dns.py --- neutron-9.0.0~b2~dev280/neutron/extensions/dns.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/dns.py 2016-08-29 20:05:49.000000000 +0000 @@ -196,11 +196,9 @@ msg = _("'%s' cannot be converted to lowercase string") % data raise n_exc.InvalidInput(error_message=msg) - -validators.validators['type:dns_name'] = _validate_dns_name -validators.validators['type:fip_dns_name'] = _validate_fip_dns_name -validators.validators['type:dns_domain'] = _validate_dns_domain - +validators.add_validator('dns_name', _validate_dns_name) +validators.add_validator('fip_dns_name', _validate_fip_dns_name) +validators.add_validator('dns_domain', _validate_dns_domain) DNSNAME = 'dns_name' DNSDOMAIN = 'dns_domain' diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/extra_dhcp_opt.py neutron-9.0.0~b3~dev557/neutron/extensions/extra_dhcp_opt.py --- neutron-9.0.0~b2~dev280/neutron/extensions/extra_dhcp_opt.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/extra_dhcp_opt.py 2016-08-29 20:05:49.000000000 +0000 @@ -62,9 +62,8 @@ if msg: raise ExtraDhcpOptBadData(data=msg) - -validators.validators['type:list_of_extra_dhcp_opts'] = ( - _validate_extra_dhcp_opt) +validators.add_validator('list_of_extra_dhcp_opts', + _validate_extra_dhcp_opt) # Attribute Map EXTRADHCPOPTS = 'extra_dhcp_opts' diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/flavors.py neutron-9.0.0~b3~dev557/neutron/extensions/flavors.py --- neutron-9.0.0~b2~dev280/neutron/extensions/flavors.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/flavors.py 2016-08-29 20:05:49.000000000 +0000 @@ -78,8 +78,8 @@ if validate_type not in plugins: raise InvalidFlavorServiceType(service_type=validate_type) -validators.validators['type:validate_flavor_service_type'] = ( - _validate_flavor_service_type) +validators.add_validator('validate_flavor_service_type', + _validate_flavor_service_type) FLAVORS = 'flavors' SERVICE_PROFILES = 'service_profiles' diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/ip_allocation.py neutron-9.0.0~b3~dev557/neutron/extensions/ip_allocation.py --- neutron-9.0.0~b2~dev280/neutron/extensions/ip_allocation.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/ip_allocation.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,56 @@ +# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions +from neutron.api.v2 import attributes + +IP_ALLOCATION = 'ip_allocation' +IP_ALLOCATION_IMMEDIATE = 'immediate' +IP_ALLOCATION_DEFERRED = 'deferred' +IP_ALLOCATION_NONE = 'none' + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + attributes.PORTS: { + IP_ALLOCATION: {'allow_post': False, + 'allow_put': False, + 'is_visible': True, }, + }, +} + + +class Ip_allocation(extensions.ExtensionDescriptor): + """Extension indicates when ports use deferred or no IP allocation.""" + + @classmethod + def get_name(cls): + return "IP Allocation" + + @classmethod + def get_alias(cls): + return "ip_allocation" + + @classmethod + def get_description(cls): + return "IP allocation extension." + + @classmethod + def get_updated(cls): + return "2016-06-10T23:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/l2_adjacency.py neutron-9.0.0~b3~dev557/neutron/extensions/l2_adjacency.py --- neutron-9.0.0~b2~dev280/neutron/extensions/l2_adjacency.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/l2_adjacency.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,58 @@ +# Copyright (c) 2016 NEC Technologies Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions + + +L2_ADJACENCY = 'l2_adjacency' +EXTENDED_ATTRIBUTES_2_0 = { + 'networks': { + L2_ADJACENCY: {'allow_post': False, + 'allow_put': False, + 'is_visible': True} + } +} + + +class L2_adjacency(extensions.ExtensionDescriptor): + """Extension class supporting L2 Adjacency for Routed Networks + + The following class is used by neutron's extension framework + to provide metadata related to the L2 Adjacency for Neutron + Routed Network, exposing the same to clients. + No new resources have been defined by this extension. + """ + + @classmethod + def get_name(cls): + return "L2 Adjacency" + + @classmethod + def get_alias(cls): + return "l2_adjacency" + + @classmethod + def get_description(cls): + return "Display L2 Adjacency for Neutron Networks." + + @classmethod + def get_updated(cls): + return "2016-04-12T16:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/l3_ext_ha_mode.py neutron-9.0.0~b3~dev557/neutron/extensions/l3_ext_ha_mode.py --- neutron-9.0.0~b2~dev280/neutron/extensions/l3_ext_ha_mode.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/l3_ext_ha_mode.py 2016-08-03 20:10:33.000000000 +0000 @@ -89,8 +89,8 @@ class HAMinimumAgentsNumberNotValid(exceptions.NeutronException): message = (_("min_l3_agents_per_router config parameter is not valid. " - "It has to be equal to or more than %s for HA.") % - n_const.MINIMUM_AGENTS_FOR_HA) + "It has to be greater than or equal to %s for HA.") % + n_const.MINIMUM_MINIMUM_AGENTS_FOR_HA) class L3_ext_ha_mode(extensions.ExtensionDescriptor): diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/l3_flavors.py neutron-9.0.0~b3~dev557/neutron/extensions/l3_flavors.py --- neutron-9.0.0~b2~dev280/neutron/extensions/l3_flavors.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/l3_flavors.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,55 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron_lib import constants + +from neutron.api import extensions + +EXTENDED_ATTRIBUTES_2_0 = { + 'routers': { + 'flavor_id': {'allow_post': True, 'allow_put': False, + 'default': constants.ATTR_NOT_SPECIFIED, + 'is_visible': True, 'enforce_policy': True} + + } +} + + +class L3_flavors(extensions.ExtensionDescriptor): + """Extension class supporting flavors for routers.""" + + @classmethod + def get_name(cls): + return "Router Flavor Extension" + + @classmethod + def get_alias(cls): + return 'l3-flavors' + + @classmethod + def get_description(cls): + return "Flavor support for routers." + + @classmethod + def get_updated(cls): + return "2016-05-17T00:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} + + def get_required_extensions(self): + return ["router", "flavors"] diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/metering.py neutron-9.0.0~b3~dev557/neutron/extensions/metering.py --- neutron-9.0.0~b2~dev280/neutron/extensions/metering.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/metering.py 2016-08-29 20:05:49.000000000 +0000 @@ -146,6 +146,10 @@ """Create a metering label.""" pass + def update_metering_label(self, context, id, metering_label): + """Update a metering label.""" + raise NotImplementedError() + @abc.abstractmethod def delete_metering_label(self, context, label_id): """Delete a metering label.""" @@ -168,6 +172,10 @@ """Create a metering label rule.""" pass + def update_metering_label_rule(self, context, id, metering_label_rule): + """Update a metering label rule.""" + raise NotImplementedError() + @abc.abstractmethod def get_metering_label_rule(self, context, rule_id, fields=None): """Get a metering label rule.""" diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/multiprovidernet.py neutron-9.0.0~b3~dev557/neutron/extensions/multiprovidernet.py --- neutron-9.0.0~b2~dev280/neutron/extensions/multiprovidernet.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/multiprovidernet.py 2016-08-29 20:05:49.000000000 +0000 @@ -67,10 +67,8 @@ if len(set(fully_specifieds)) != len(fully_specifieds): raise SegmentsContainDuplicateEntry() - -validators.validators['type:convert_segments'] = ( - _convert_and_validate_segments) - +validators.add_validator('convert_segments', + _convert_and_validate_segments) EXTENDED_ATTRIBUTES_2_0 = { 'networks': { diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/pagination.py neutron-9.0.0~b3~dev557/neutron/extensions/pagination.py --- neutron-9.0.0~b2~dev280/neutron/extensions/pagination.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/pagination.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from neutron.api import extensions + + +_ALIAS = 'pagination' + + +class Pagination(extensions.ExtensionDescriptor): + """Fake extension that indicates that pagination is enabled.""" + + extensions.register_custom_supported_check( + _ALIAS, lambda: cfg.CONF.allow_pagination, plugin_agnostic=True + ) + + @classmethod + def get_name(cls): + return "Pagination support" + + @classmethod + def get_alias(cls): + return _ALIAS + + @classmethod + def get_description(cls): + return "Extension that indicates that pagination is enabled." + + @classmethod + def get_updated(cls): + return "2016-06-12T00:00:00-00:00" + + @classmethod + def get_resources(cls): + return [] + + def get_extended_resources(self, version): + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/portbindings.py neutron-9.0.0~b3~dev557/neutron/extensions/portbindings.py --- neutron-9.0.0~b2~dev280/neutron/extensions/portbindings.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/portbindings.py 2016-08-29 20:05:49.000000000 +0000 @@ -50,6 +50,7 @@ VIF_DETAILS_MACVTAP_SOURCE = 'macvtap_source' VIF_DETAILS_MACVTAP_MODE = 'macvtap_mode' VIF_DETAILS_PHYSICAL_INTERFACE = 'physical_interface' +VIF_DETAILS_BRIDGE_NAME = 'bridge_name' # The keys below are used in the VIF_DETAILS attribute to convey # information related to the configuration of the vhost-user VIF driver. diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/qos.py neutron-9.0.0~b3~dev557/neutron/extensions/qos.py --- neutron-9.0.0~b2~dev280/neutron/extensions/qos.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/qos.py 2016-08-29 20:05:49.000000000 +0000 @@ -77,11 +77,13 @@ **{'max_kbps': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, - 'validate': {'type:non_negative': None}}, + 'validate': {'type:range': [0, + common_constants.DB_INTEGER_MAX_VALUE]}}, 'max_burst_kbps': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': 0, - 'validate': {'type:non_negative': None}}}) + 'validate': {'type:range': [0, + common_constants.DB_INTEGER_MAX_VALUE]}}}) }, 'dscp_marking_rules': { 'parent': {'collection_name': 'policies', @@ -226,18 +228,18 @@ # "delete_policy_rule". proxy_method = attrib.replace(rule_type + '_', '') - rule_obj = self.rule_objects[rule_type] - return self._call_proxy_method(proxy_method, rule_obj) + rule_cls = self.rule_objects[rule_type] + return self._call_proxy_method(proxy_method, rule_cls) # If we got here, then either attrib matched no pattern or the # rule_type embedded in attrib wasn't in self.rule_objects. raise AttributeError(attrib) - def _call_proxy_method(self, method_name, rule_obj): - """Call proxy method. We need to add the rule_obj, obtained from the + def _call_proxy_method(self, method_name, rule_cls): + """Call proxy method. We need to add the rule_cls, obtained from the self.rule_objects dictionary, to the incoming args. The context is passed to proxy method as first argument; the remaining args will - follow rule_obj. + follow rule_cls. Some of the incoming method calls have the policy rule name as one of the keys in the kwargs. For instance, the incoming kwargs for the @@ -258,24 +260,24 @@ :param method_name: the name of the method to call :type method_name: str - :param rule_obj: the rule object, which is sent as an argument to the + :param rule_cls: the rule class, which is sent as an argument to the proxy method - :type rule_obj: a class from the rule_object (qos.objects.rule) module + :type rule_cls: a class from the rule_object (qos.objects.rule) module """ - def _make_call(method_name, rule_obj, *args, **kwargs): + def _make_call(method_name, rule_cls, *args, **kwargs): context = args[0] args_list = list(args[1:]) params = kwargs - rule_data_name = rule_obj.rule_type + "_rule" + rule_data_name = rule_cls.rule_type + "_rule" if rule_data_name in params: params['rule_data'] = params.pop(rule_data_name) return getattr(self, method_name)( - context, rule_obj, *args_list, **params + context, rule_cls, *args_list, **params ) return lambda *args, **kwargs: _make_call( - method_name, rule_obj, *args, **kwargs) + method_name, rule_cls, *args, **kwargs) def get_plugin_description(self): return "QoS Service Plugin for ports and networks" @@ -311,25 +313,25 @@ pass @abc.abstractmethod - def create_policy_rule(self, context, policy_id, rule_data, rule_obj): + def create_policy_rule(self, context, rule_cls, policy_id, rule_data): pass @abc.abstractmethod - def update_policy_rule(self, context, rule_id, policy_id, rule_data, - rule_obj): + def update_policy_rule(self, context, rule_cls, rule_id, policy_id, + rule_data): pass @abc.abstractmethod - def delete_policy_rule(self, context, rule_id, policy_id): + def delete_policy_rule(self, context, rule_cls, rule_id, policy_id): pass @abc.abstractmethod - def get_policy_rule(self, context, rule_id, policy_id, rule_obj, + def get_policy_rule(self, context, rule_cls, rule_id, policy_id, fields=None): pass @abc.abstractmethod - def get_policy_rules(self, context, policy_id, rule_obj, + def get_policy_rules(self, context, rule_cls, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/rbac.py neutron-9.0.0~b3~dev557/neutron/extensions/rbac.py --- neutron-9.0.0~b2~dev280/neutron/extensions/rbac.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/rbac.py 2016-08-29 20:05:49.000000000 +0000 @@ -60,12 +60,12 @@ 'enforce_policy': True}, 'object_id': {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, - 'is_visible': True, 'default': None, - 'enforce_policy': True}, + 'is_visible': True, 'enforce_policy': True}, 'target_tenant': {'allow_post': True, 'allow_put': True, - 'is_visible': True, 'enforce_policy': True, - 'default': None}, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, + 'is_visible': True, 'enforce_policy': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'required_by_policy': True, 'is_visible': True}, 'action': {'allow_post': True, 'allow_put': False, # action depends on type so validation has to occur in @@ -73,8 +73,7 @@ 'validate': {'type:string': attr.DESCRIPTION_MAX_LEN}, # we set enforce_policy so operators can define policies # that restrict actions - 'is_visible': True, 'enforce_policy': True, - 'default': None}, + 'is_visible': True, 'enforce_policy': True} } } diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/revisions.py neutron-9.0.0~b3~dev557/neutron/extensions/revisions.py --- neutron-9.0.0~b2~dev280/neutron/extensions/revisions.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/revisions.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,53 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.api import extensions + + +REVISION = 'revision' +REVISION_BODY = { + REVISION: {'allow_post': False, 'allow_put': False, + 'is_visible': True, 'default': None}, +} +RESOURCES = ('security_group_rules', 'security_groups', 'ports', 'subnets', + 'networks', 'routers', 'floatingips', 'subnetpools') +EXTENDED_ATTRIBUTES_2_0 = {} +for resource in RESOURCES: + EXTENDED_ATTRIBUTES_2_0[resource] = REVISION_BODY + + +class Revisions(extensions.ExtensionDescriptor): + """Extension to expose revision number of standard attr resources.""" + + @classmethod + def get_name(cls): + return "Resource revision numbers" + + @classmethod + def get_alias(cls): + return "revisions" + + @classmethod + def get_description(cls): + return ("This extension will display the revision number of neutron " + "resources.") + + @classmethod + def get_updated(cls): + return "2016-04-11T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/securitygroup.py neutron-9.0.0~b3~dev557/neutron/extensions/securitygroup.py --- neutron-9.0.0~b2~dev280/neutron/extensions/securitygroup.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/securitygroup.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,6 +19,7 @@ from neutron_lib.api import validators from neutron_lib import constants as const from neutron_lib import exceptions as nexception +from oslo_utils import netutils from oslo_utils import uuidutils import six @@ -175,13 +176,9 @@ def convert_validate_port_value(port): if port is None: return port - try: - val = int(port) - except (ValueError, TypeError): - raise SecurityGroupInvalidPortValue(port=port) - if val >= 0 and val <= 65535: - return val + if netutils.is_valid_port(port): + return int(port) else: raise SecurityGroupInvalidPortValue(port=port) @@ -210,8 +207,7 @@ if data.lower() == "default": raise SecurityGroupDefaultAlreadyExists() - -validators.validators['type:name_not_default'] = _validate_name_not_default +validators.add_validator('name_not_default', _validate_name_not_default) sg_supported_protocols = ([None] + list(const.IP_PROTOCOL_MAP.keys())) sg_supported_ethertypes = ['IPv4', 'IPv6'] diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/sorting.py neutron-9.0.0~b3~dev557/neutron/extensions/sorting.py --- neutron-9.0.0~b2~dev280/neutron/extensions/sorting.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/sorting.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_config import cfg + +from neutron.api import extensions + + +_ALIAS = 'sorting' + + +class Sorting(extensions.ExtensionDescriptor): + """Fake extension that indicates that sorting is enabled.""" + + extensions.register_custom_supported_check( + _ALIAS, lambda: cfg.CONF.allow_sorting, plugin_agnostic=True + ) + + @classmethod + def get_name(cls): + return "Sorting support" + + @classmethod + def get_alias(cls): + return _ALIAS + + @classmethod + def get_description(cls): + return "Extension that indicates that sorting is enabled." + + @classmethod + def get_updated(cls): + return "2016-06-12T00:00:00-00:00" + + @classmethod + def get_resources(cls): + return [] + + def get_extended_resources(self, version): + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/subnet_service_types.py neutron-9.0.0~b3~dev557/neutron/extensions/subnet_service_types.py --- neutron-9.0.0~b2~dev280/neutron/extensions/subnet_service_types.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/subnet_service_types.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib.api import validators +from neutron_lib import constants +from neutron_lib import exceptions +import webob.exc + +from neutron._i18n import _ +from neutron.api import extensions +from neutron.api.v2 import attributes + + +# List for service plugins to register their own prefixes +valid_prefixes = [] + + +class InvalidSubnetServiceType(exceptions.InvalidInput): + message = _("Subnet service type %(service_type)s does not correspond " + "to a valid device owner.") + + +def _validate_subnet_service_types(service_types, valid_values=None): + if service_types: + if not isinstance(service_types, list): + raise webob.exc.HTTPBadRequest( + _("Subnet service types must be a list.")) + + prefixes = valid_prefixes + # Include standard prefixes + prefixes += list(constants.DEVICE_OWNER_PREFIXES) + prefixes += constants.DEVICE_OWNER_COMPUTE_PREFIX + + for service_type in service_types: + if not service_type.startswith(tuple(prefixes)): + raise InvalidSubnetServiceType(service_type=service_type) + + +validators.add_validator('type:validate_subnet_service_types', + _validate_subnet_service_types) + + +EXTENDED_ATTRIBUTES_2_0 = { + attributes.SUBNETS: { + 'service_types': {'allow_post': True, + 'allow_put': True, + 'default': constants.ATTR_NOT_SPECIFIED, + 'validate': {'type:validate_subnet_service_types': + None}, + 'is_visible': True, }, + }, +} + + +class Subnet_service_types(extensions.ExtensionDescriptor): + """Extension class supporting subnet service types.""" + + @classmethod + def get_name(cls): + return "Subnet service types" + + @classmethod + def get_alias(cls): + return "subnet-service-types" + + @classmethod + def get_description(cls): + return "Provides ability to set the subnet service_types field" + + @classmethod + def get_updated(cls): + return "2016-03-15T18:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/tag.py neutron-9.0.0~b3~dev557/neutron/extensions/tag.py --- neutron-9.0.0~b2~dev280/neutron/extensions/tag.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/tag.py 2016-08-03 20:10:33.000000000 +0000 @@ -15,7 +15,6 @@ from neutron_lib.api import validators from neutron_lib import exceptions -from oslo_log import log as logging import six import webob.exc @@ -28,8 +27,6 @@ from neutron.services import service_base -LOG = logging.getLogger(__name__) - TAG = 'tag' TAGS = TAG + 's' MAX_TAG_LEN = 60 diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/trunk_details.py neutron-9.0.0~b3~dev557/neutron/extensions/trunk_details.py --- neutron-9.0.0~b2~dev280/neutron/extensions/trunk_details.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/trunk_details.py 2016-08-03 20:10:33.000000000 +0000 @@ -0,0 +1,56 @@ +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants + +from neutron.api import extensions + + +# NOTE(armax): because of the API machinery, this extension must be on +# its own. This aims at providing subport information for ports that +# are parent in a trunk so that consumers of the Neutron API, like Nova +# can efficiently access trunk information for things like metadata or +# config-drive configuration. +EXTENDED_ATTRIBUTES_2_0 = { + 'ports': {'trunk_details': {'allow_post': False, 'allow_put': False, + 'default': constants.ATTR_NOT_SPECIFIED, + 'is_visible': True, + 'enforce_policy': True, + 'required_by_policy': True}}, +} + + +class Trunk_details(extensions.ExtensionDescriptor): + + @classmethod + def get_name(cls): + return "Trunk port details" + + @classmethod + def get_alias(cls): + return "trunk-details" + + @classmethod + def get_description(cls): + return "Expose trunk port details" + + @classmethod + def get_updated(cls): + return "2016-01-01T10:00:00-00:00" + + def get_extended_resources(self, version): + if version == "2.0": + return EXTENDED_ATTRIBUTES_2_0 + else: + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/extensions/trunk.py neutron-9.0.0~b3~dev557/neutron/extensions/trunk.py --- neutron-9.0.0~b2~dev280/neutron/extensions/trunk.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/extensions/trunk.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,101 @@ +# Copyright (c) 2016 ZTE Inc. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib.api import converters + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper + + +RESOURCE_ATTRIBUTE_MAP = { + 'trunks': { + 'admin_state_up': {'allow_post': True, 'allow_put': True, + 'default': True, + 'convert_to': converters.convert_to_boolean, + 'is_visible': True}, + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, 'primary_key': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': attr.NAME_MAX_LEN}, + 'default': '', 'is_visible': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': + {'type:string': attr.TENANT_ID_MAX_LEN}, + 'is_visible': True}, + 'port_id': {'allow_post': True, 'allow_put': False, + 'required_by_policy': True, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'status': {'allow_post': False, 'allow_put': False, + 'is_visible': True}, + 'sub_ports': {'allow_post': True, 'allow_put': False, + 'default': [], + 'convert_list_to': converters.convert_kvp_list_to_dict, + 'validate': {'type:subports': None}, + 'enforce_policy': True, + 'is_visible': True} + }, +} + + +class Trunk(extensions.ExtensionDescriptor): + """Trunk API extension.""" + + @classmethod + def get_name(cls): + return "Trunk Extension" + + @classmethod + def get_alias(cls): + return "trunk" + + @classmethod + def get_description(cls): + return "Provides support for trunk ports" + + @classmethod + def get_updated(cls): + return "2016-01-01T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + attr.PLURALS.update(plural_mappings) + action_map = {'trunk': {'add_subports': 'PUT', + 'remove_subports': 'PUT', + 'get_subports': 'GET'}} + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + 'trunk', + action_map=action_map, + register_quota=True) + + def update_attributes_map(self, attributes, extension_attrs_map=None): + super(Trunk, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_required_extensions(self): + return ["binding"] + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} diff -Nru neutron-9.0.0~b2~dev280/neutron/hacking/checks.py neutron-9.0.0~b3~dev557/neutron/hacking/checks.py --- neutron-9.0.0~b2~dev280/neutron/hacking/checks.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/hacking/checks.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,12 +15,12 @@ import os import re +from debtcollector import moves +from hacking import core from neutron_lib.hacking import checks import pep8 import six -from hacking import core - def flake8ext(f): """Decorator to indicate flake8 extension. @@ -64,6 +64,9 @@ } +log_string_interpolation = re.compile(r".*LOG\.(?:error|warn|warning|info" + r"|critical|exception|debug)" + r"\([^,]*%[^,]*[,)]") log_translation_hint = re.compile( '|'.join('(?:%s)' % _regex_for_level(level, hint) for level, hint in six.iteritems(_all_log_levels))) @@ -76,6 +79,7 @@ @flake8ext def validate_log_translations(logical_line, physical_line, filename): + """N320 - Log messages require translation.""" # Translations are not required in the test directory if "neutron/tests" in filename: return @@ -89,6 +93,7 @@ @flake8ext def use_jsonutils(logical_line, filename): + """N321 - Use jsonutils instead of json.""" msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" # Some files in the tree are not meant to be run from inside Neutron @@ -112,14 +117,13 @@ @flake8ext def no_translate_debug_logs(logical_line, filename): - """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' + """N319 - Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. - N319 """ for hint in _all_hints: if logical_line.startswith("LOG.debug(%s(" % hint): @@ -128,11 +132,12 @@ @flake8ext def check_assert_called_once_with(logical_line, filename): - # Try to detect unintended calls of nonexistent mock methods like: - # assert_called_once - # assertCalledOnceWith - # assert_has_called - # called_once_with + """N322 - Try to detect unintended calls of nonexistent mock methods like: + assert_called_once + assertCalledOnceWith + assert_has_called + called_once_with + """ if 'neutron/tests/' in filename: if '.assert_called_once_with(' in logical_line: return @@ -152,6 +157,7 @@ @flake8ext def check_no_contextlib_nested(logical_line, filename): + """N324 - Don't use contextlib.nested.""" msg = ("N324: contextlib.nested is deprecated. With Python 2.7 and later " "the with-statement supports multiple nested objects. See https://" "docs.python.org/2/library/contextlib.html#contextlib.nested for " @@ -163,6 +169,7 @@ @flake8ext def check_python3_xrange(logical_line): + """N325 - Do not use xrange.""" if re.search(r"\bxrange\s*\(", logical_line): yield(0, "N325: Do not use xrange. Use range, or six.moves.range for " "large loops.") @@ -170,6 +177,7 @@ @flake8ext def check_no_basestring(logical_line): + """N326 - Don't use basestring.""" if re.search(r"\bbasestring\b", logical_line): msg = ("N326: basestring is not Python3-compatible, use " "six.string_types instead.") @@ -178,46 +186,59 @@ @flake8ext def check_python3_no_iteritems(logical_line): + """N327 - Use six.iteritems()""" if re.search(r".*\.iteritems\(\)", logical_line): msg = ("N327: Use six.iteritems() instead of dict.iteritems().") yield(0, msg) @flake8ext -def check_asserttrue(logical_line, filename): +def check_asserttruefalse(logical_line, filename): + """N328 - Don't use assertEqual(True/False, observed).""" if 'neutron/tests/' in filename: - if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line): + if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) - if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line): + if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) + if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?", logical_line): + msg = ("N328: Use assertFalse(observed) instead of " + "assertEqual(False, observed)") + yield (0, msg) + if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?", logical_line): + msg = ("N328: Use assertFalse(observed) instead of " + "assertEqual(False, observed)") + yield (0, msg) + + +check_asserttrue = flake8ext( + moves.moved_function( + check_asserttruefalse, 'check_asserttrue', __name__, + version='Newton', removal_version='Ocata')) + + +check_assertfalse = flake8ext( + moves.moved_function( + check_asserttruefalse, 'check_assertfalse', __name__, + version='Newton', removal_version='Ocata')) @flake8ext def no_mutable_default_args(logical_line): + """N329 - Don't use mutable default arguments.""" msg = "N329: Method's default argument shouldn't be mutable!" if checks.mutable_default_args.match(logical_line): yield (0, msg) @flake8ext -def check_assertfalse(logical_line, filename): - if 'neutron/tests/' in filename: - if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertFalse(observed) instead of " - "assertEqual(False, observed)") - yield (0, msg) - if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): - msg = ("N328: Use assertFalse(observed) instead of " - "assertEqual(False, observed)") - yield (0, msg) - - -@flake8ext def check_assertempty(logical_line, filename): + """N330 - Enforce using assertEqual parameter ordering in case of empty + objects. + """ if 'neutron/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " @@ -230,6 +251,7 @@ @flake8ext def check_assertisinstance(logical_line, filename): + """N331 - Enforce using assertIsInstance.""" if 'neutron/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): @@ -240,6 +262,7 @@ @flake8ext def check_assertequal_for_httpcode(logical_line, filename): + """N332 - Enforce correct oredering for httpcode in assertEqual.""" msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'neutron/tests/' in filename: @@ -250,6 +273,7 @@ @flake8ext def check_log_warn_deprecated(logical_line, filename): + """N333 - Use LOG.warning.""" msg = "N333: Use LOG.warning due to compatibility with py3" if log_warn.match(logical_line): yield (0, msg) @@ -257,7 +281,7 @@ @flake8ext def check_oslo_i18n_wrapper(logical_line, filename, noqa): - """Check for neutron.i18n usage. + """N340 - Check for neutron.i18n usage. Okay(neutron/foo/bar.py): from neutron._i18n import _ Okay(neutron_lbaas/foo/bar.py): from neutron_lbaas._i18n import _ @@ -286,7 +310,7 @@ @flake8ext def check_builtins_gettext(logical_line, tokens, filename, lines, noqa): - """Check usage of builtins gettext _(). + """N341 - Check usage of builtins gettext _(). Okay(neutron/foo.py): from neutron._i18n import _\n_('foo') N341(neutron/foo.py): _('foo') @@ -327,6 +351,7 @@ @core.flake8ext @core.off_by_default def check_unittest_imports(logical_line): + """N334 - Use unittest2 instead of unittest""" if (re.match(unittest_imports_from, logical_line) or re.match(unittest_imports_dot, logical_line)): msg = "N334: '%s' must be used instead of '%s'." % ( @@ -334,6 +359,28 @@ yield (0, msg) +@flake8ext +def check_delayed_string_interpolation(logical_line, filename, noqa): + """N342 String interpolation should be delayed at logging calls. + + N342: LOG.debug('Example: %s' % 'bad') + Okay: LOG.debug('Example: %s', 'good') + """ + msg = ("N342 String interpolation should be delayed to be " + "handled by the logging code, rather than being done " + "at the point of the logging call. " + "Use ',' instead of '%'.") + + if noqa: + return + + if 'neutron/tests/' in filename: + return + + if log_string_interpolation.match(logical_line): + yield(0, msg) + + def factory(register): register(validate_log_translations) register(use_jsonutils) @@ -343,9 +390,8 @@ register(check_python3_xrange) register(check_no_basestring) register(check_python3_no_iteritems) - register(check_asserttrue) + register(check_asserttruefalse) register(no_mutable_default_args) - register(check_assertfalse) register(check_assertempty) register(check_assertisinstance) register(check_assertequal_for_httpcode) @@ -353,3 +399,4 @@ register(check_oslo_i18n_wrapper) register(check_builtins_gettext) register(check_unittest_imports) + register(check_delayed_string_interpolation) diff -Nru neutron-9.0.0~b2~dev280/neutron/ipam/driver.py neutron-9.0.0~b3~dev557/neutron/ipam/driver.py --- neutron-9.0.0~b2~dev280/neutron/ipam/driver.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/ipam/driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -119,6 +119,18 @@ :raises: TODO(Carl) What sort of errors do we need to plan for? """ + def needs_rollback(self): + """Whether driver needs an explicit rollback when operations fail. + + A driver that (de)allocates resources in the same DB transaction passed + to it by Neutron will not want explicit rollback. A truly external IPAM + system would need to return True for sure. The default is True since + all drivers were assumed to be designed to need it from the start. + + :returns: True if driver needs to be called on rollback + """ + return True + @six.add_metaclass(abc.ABCMeta) class Subnet(object): diff -Nru neutron-9.0.0~b2~dev280/neutron/ipam/drivers/neutrondb_ipam/driver.py neutron-9.0.0~b3~dev557/neutron/ipam/drivers/neutrondb_ipam/driver.py --- neutron-9.0.0~b2~dev280/neutron/ipam/drivers/neutrondb_ipam/driver.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/ipam/drivers/neutrondb_ipam/driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -291,7 +291,7 @@ raise ipam_exc.InvalidSubnetRequest( reason=_("An identifier must be specified when updating " "a subnet")) - if not subnet_request.allocation_pools: + if subnet_request.allocation_pools is None: LOG.debug("Update subnet request for subnet %s did not specify " "new allocation pools, there is nothing to do", subnet_request.subnet_id) @@ -314,3 +314,6 @@ "Neutron subnet %s does not exist"), subnet_id) raise n_exc.SubnetNotFound(subnet_id=subnet_id) + + def needs_rollback(self): + return False diff -Nru neutron-9.0.0~b2~dev280/neutron/ipam/exceptions.py neutron-9.0.0~b3~dev557/neutron/ipam/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/ipam/exceptions.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/ipam/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -68,6 +68,10 @@ message = _("No more IP addresses available.") +class IpAddressGenerationFailureNoMatchingSubnet(IpAddressGenerationFailure): + message = _("No valid service subnet for the given device owner.") + + class IPAllocationFailed(exceptions.NeutronException): message = _("IP allocation failed. Try again later.") diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/de/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/de/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/de/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/de/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -8,9 +8,9 @@ # Frank Kloeker , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -194,13 +194,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' ist kleiner als 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(Veraltet. Verwenden Sie stattdessen '--subproject neutron-SERVICE'.) Der " -"erweiterte Dienst für den der Befehl ausgeführt werden soll." - msgid "0 is not allowed as CIDR prefix length" msgstr "0 ist als Länge für CIDR-Präfix nicht zulässig" @@ -602,9 +595,6 @@ msgstr "" "IPv4- und IPv6-Präfixe können in einem Subnetzpool nicht gemischt werden." -msgid "Cannot specify both --service and --subproject." -msgstr "--service und --subproject können nicht gemeinsam angegeben werden." - msgid "Cannot specify both subnet-id and port-id" msgstr "Angabe sowohl von Teilnetz-ID als auch von Port-ID nicht möglich" @@ -1291,9 +1281,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "Ende des Tunnelbereichs ist kleiner als Anfang des Tunnelbereichs" -msgid "Enforce using split branches file structure." -msgstr "Verwendung einer Dateistruktur mit getrennten Zweigen erzwingen." - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Fehler %(reason)s beim Ausführen der Operation." @@ -1502,14 +1489,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Dynamische IP-Adresse %(floatingip_id)s konnte nicht gefunden werden" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"Dynamische IP-Adresse %(floatingip_id)s wurde der Nicht-IPv4-Adresse " -"%s(internal_ip)s zugeordnet und kann daher nicht gebunden werden." - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Für TCP/UDP-Protokolle muss 'port_range_min' '<= port_range_max' sein" @@ -1593,9 +1572,6 @@ "Maschine verwendet werden soll. Alle auf dieser Maschine ausgeführten " "Agenten und Services müssen denselben Hostwert verwenden." -msgid "How many times Neutron will retry MAC generation" -msgstr "Wie oft Neutron die MAC-Adressenerstellung erneut versuchen wird" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1699,13 +1675,6 @@ "Bei 'True' sollen Plugins, die dies unterstützen, VLAN-transparente Netze " "erstellen dürfen." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"Wenn 'namespaces' inaktiviert ist, kann der Agent der Ebene 3 nur einen " -"Router konfigurieren, der über die entsprechende Router-ID verfügt." - msgid "Illegal IP version number" msgstr "Illegale IP-Versionsnummer" @@ -2011,9 +1980,6 @@ "physical_network-Namen zuzulassen. Verwenden Sie eine leere Liste, um " "einfache Netze zu inaktivieren." -msgid "Local IP address of the VXLAN endpoints." -msgstr "Lokale IP-Adresse von VXLAN-Endpunkten." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Position für UNIX-Domänensocket von Metadaten-Proxy." @@ -2060,15 +2026,6 @@ "automatisch von diesem Wert ab. Nimmt standardmäßig den Wert 1500 an, dem " "Standardwert für Ethernet." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"MTU-Einstellung für Gerät. Diese Option wird in Newton entfernt. Verwenden " -"Sie die systemweite Einstellung 'segment_mtu', die von den Agenten beim " -"Verbinden von VIFs berücksichtigt wird. " - msgid "MTU size of veth interfaces" msgstr "MTU-Größe von Veth-Schnittstellen" @@ -2146,9 +2103,6 @@ "Messungsbezeichnungsregel mit remote_ip_prefix %(remote_ip_prefix)s weist " "eine Überschneidung mit einer anderen auf" -msgid "Method cannot be called within a transaction." -msgstr "Die Methode kann nicht aus einer Transaktion heraus aufgerufen werden." - msgid "Migration from distributed router to centralized is not supported" msgstr "" "Die Migration von verteiltem Router zu zentralisiertem Router wird nicht " @@ -2251,18 +2205,6 @@ msgstr "Name der zu verwendenden Open vSwitch-Brücke" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"Der Name der für externen Netzverkehr verwendeten Brücke. Für die Linux-" -"Brücke sollte diese Eigenschaft auf einen leeren Wert gesetzt werden. Wenn " -"dieser Parameter gesetzt ist, kann jeder L3-Agent maximal einem externen " -"Netz zugeordnet werden. Diese Option ist veraltet und wird im M-Release " -"entfernt. " - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Name der zu verwendenden Nova-Region. Nützlich, wenn Keystone mehrere " @@ -2340,9 +2282,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Neutron-'core_plugin' nicht konfiguriert!" -msgid "Neutron plugin provider module" -msgstr "Provider-Modul für Neutron-Plugin" - msgid "New value for first_ip or last_ip has to be specified." msgstr "Der neue Wert für 'first_ip' oder 'last_ip' muss angegeben werden." @@ -2380,16 +2319,9 @@ msgstr "Keine Offline-Migrationen anstehend." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "Keine Anbieter angegeben für Dienste '%s', wird beendet" - -#, python-format msgid "No shared key in %s fields" msgstr "Kein gemeinsam genutzter Schlüssel in %s-Feldern" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "Kein Versionscallback im ResourceVersionsManager bereitgestellt." - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Es ist nicht zulässig, einem Agenten im Modus 'dvr' manuell einen Router " @@ -2928,13 +2860,6 @@ msgid "Resource body required" msgstr "Ressourcen-Nachrichtentext erforderlich" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Resourcenname(n), die in Quotenfunktionen unterstützt werden. Diese Option " -"wird jetzt nicht weiter unterstützt und kann später entfernt werden." - msgid "Resource not found." msgstr "Ressource nicht gefunden." @@ -3591,14 +3516,6 @@ "Diesem Netz sind Router zugeordnet, die für den Zugriff von dieser " "Richtlinie abhängig sind." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"Dies legt das Web-Framework fest, in dem der Neutron-API-Server ausgeführt " -"werden soll. 'pecan' ist eine neu programmierte experimentelle Version des " -"API-Servers." - msgid "Timeout" msgstr "Zeitüberschreitung" @@ -4192,14 +4109,6 @@ "Der Konfigurationsparameter max_l3_agents_per_router %(max_agents)s ist " "ungültig. Ermuss größer-gleich min_l3_agents_per_router %(min_agents)s sein." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"Konfigurationsparameter min_l3_agents_per_router ist nicht gültig. Der Wert " -"muss für hohe Verfügbarkeit größer-gleich %s sein." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Es müssen exakt 2 Argumente angegeben werden: cidr und MAC." diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/es/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/es/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/es/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/es/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -188,13 +188,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' es menor que 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(En desuso. Utilice '--subproject neutron-SERVICE' en su lugar). Servicio " -"avanzado contra el que ejecutar el mandato." - msgid "0 is not allowed as CIDR prefix length" msgstr "0 no está permitido como longitud del prefijo de CIDR" @@ -577,9 +570,6 @@ msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "No se pueden mezclar prefijos IPv4 y IPv6 en una agrupación de subred." -msgid "Cannot specify both --service and --subproject." -msgstr "No se pueden especificar los dos: --service y --subproject." - msgid "Cannot specify both subnet-id and port-id" msgstr "No se puede especificar el ID de subred y el ID de puerto" @@ -1203,9 +1193,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "El final del rango de túnel es menor que el inicio del rango de túnel" -msgid "Enforce using split branches file structure." -msgstr "Imponer utilizando la estructura del archivo de ramas de división." - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Error %(reason)s al intentar realizar la operación." @@ -1412,14 +1399,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "No se ha podido encontrar la IP flotante %(floatingip_id)s." -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"La IP flotante %(floatingip_id)s está asociada con una dirección no IPv4 " -"%s(internal_ip)s y, por tanto, no se puede enlazar." - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Para los protocolos TCP/UDP, port_range_min debe ser <= port_range_max" @@ -1504,9 +1483,6 @@ "Neutron que se ejecutan en esta máquina. Todos los agentes y servicios que " "se ejecutan en esta máquina deben utilizar el mismo valor de host." -msgid "How many times Neutron will retry MAC generation" -msgstr "Cuántas veces Neutron intentará de nuevo la generación MAC" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1612,13 +1588,6 @@ "Si es True, permite a los plug-in que la soportan crear redes VLAN " "transparentes." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"Si no está vacío, el agente l3 solo puede configurar un enrutador que tenga " -"el ID de enrutador correspondiente." - msgid "Illegal IP version number" msgstr "Número de versión IP no permitido" @@ -1915,9 +1884,6 @@ "con nombres de physical_network arbitrarios. Utillice una lista vacía para " "deshabilitar las redes simples." -msgid "Local IP address of the VXLAN endpoints." -msgstr "Dirección IP local de puntos finales VXLAN." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Ubicación para socket de dominio UNIX de proxy de metadatos." @@ -1965,15 +1931,6 @@ "encima de este valor. El valor predeterminado es 1500, que es el valor " "estándar para Ethernet." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"Parámetro MTU para el dispositivo. Esta opción se eliminará en Newton. " -"Utilice el parámetro system-wide segment_mtu, que los agentes tendrán en " -"cuenta al conectar las VIF." - msgid "MTU size of veth interfaces" msgstr "Tamaño de MTU de la interfaz de veth" @@ -2054,9 +2011,6 @@ "Regla de etiqueta de medición con remote_ip_prefix %(remote_ip_prefix)s se " "solapa otro" -msgid "Method cannot be called within a transaction." -msgstr "No se puede llamar a este método dentro de una transacción." - msgid "Migration from distributed router to centralized is not supported" msgstr "No se admite migrar de un direccionador distribuido a uno centralizado" @@ -2154,17 +2108,6 @@ msgstr "Nombre de puente de Open vSwitch a utilizar" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"Nombre del puente utilizado para el tráfico de red externa. Se debe " -"establecer a un valor vacío para el puente Linux. Cuando este parámetro está " -"definido, se puede asociar cada uno de los agentes L3 a no más de una red " -"externa. Esta opción está en desuso y se eliminará en el release M. " - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nombre de región de nova a utilizar. Es útil si keystone gestiona más de una " @@ -2241,9 +2184,6 @@ msgid "Neutron core_plugin not configured!" msgstr "¡Neutron core_plugin no está configurado!" -msgid "Neutron plugin provider module" -msgstr "Módulo de proveedor de plugin de Neutron" - msgid "New value for first_ip or last_ip has to be specified." msgstr "Se debe especificar un nuevo valor para first_ip o last_ip." @@ -2280,18 +2220,9 @@ msgstr "No hay migraciones fuera de línea pendientes." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "No hay proveedores especificados para '%s' servicio, salir" - -#, python-format msgid "No shared key in %s fields" msgstr "No hay ninguna clave compartida en los campos de %s" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "" -"No se proporciona devolución de llamada de versiones en " -"ResourceVersionsManager" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "No está permitido asignar manualmente un direccionador a un agente en modo " @@ -2819,13 +2750,6 @@ msgid "Resource body required" msgstr "Se necesita cuerpo de recurso" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Nombres de recursos soportados en las características de cuota. Esta opción " -"está en desuso para su eliminación." - msgid "Resource not found." msgstr "Recurso no encontrado." @@ -3467,14 +3391,6 @@ "Hay direccionadores conectados a esta red que dependen de esta política para " "su acceso." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"Con esta opción se elegirá la infraestructura web en la que ejecutar el " -"servidor de la API Neutron. 'pecan' es una nueva reescritura experimental " -"del servidor de API." - msgid "Timeout" msgstr "Tiempo de espera" @@ -4059,14 +3975,6 @@ "El parámetro de configuración max_l3_agents_per_router %(max_agents)s no es " "válido. Debe ser mayor o igual que min_l3_agents_per_router %(min_agents)s." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"El parámetro de configuración min_l3_agents_per_router no es válido. Tiene " -"que ser igual o mayor que %s para HA." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "debe dar exactamente 2 argumentos: cidr y MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/fr/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/fr/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/fr/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/fr/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -196,13 +196,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' est plus petit que 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(Obsolète. Utilisez '--subproject neutron-SERVICE' à la place.) Service " -"avancé sur lequel exécuter la commande. " - msgid "0 is not allowed as CIDR prefix length" msgstr "La longueur 0 n'est pas autorisée pour le préfixe CIDR" @@ -589,9 +582,6 @@ msgstr "" "Impossible d'associer les préfixes IPv4 et IPv6 dans un pool de sous-réseau." -msgid "Cannot specify both --service and --subproject." -msgstr "Impossible de spécifier le service et le sous-projet. " - msgid "Cannot specify both subnet-id and port-id" msgstr "Impossible de spécifier l'ID sous-réseau et l'ID port" @@ -1222,10 +1212,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "La fin de la plage de tunnels est inférieure au début" -msgid "Enforce using split branches file structure." -msgstr "" -"Imposer l'utilisation d'une structure de fichier de branches fractionnées. " - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Erreur %(reason)s lors de la tentative d'exécution de l'opération." @@ -1434,14 +1420,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "L'adresse IP flottante %(floatingip_id)s est introuvable" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"L'adresse IP flottante %(floatingip_id)s est associée à l'adresse non IPV4 " -"%s(internal_ip)s, par conséquent, elle ne peut pas être liée. " - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Pour les protocole TCP/UDP, port_range_min doit être <= port_range_max" @@ -1524,11 +1502,6 @@ "Neutron qui s'exécutent sur cette machine. Tous les agents et services qui " "s'exécutent sur cette machine doivent utiliser la même valeur d'hôte." -msgid "How many times Neutron will retry MAC generation" -msgstr "" -"Nombre de nouvelles tentatives de génération MAC ultérieurement effectuées " -"par Neutron" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1633,13 +1606,6 @@ "Si True, autorisez les plug-in qui les prennent en charge pour créer les " "réseaux VLAN transparents." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"S'il n'est pas vide, l'agent l3 peut uniquement configurer un routeur qui " -"comporte l'ID routeur correspondant." - msgid "Illegal IP version number" msgstr "Numéro de version IP illégal" @@ -1938,9 +1904,6 @@ "centralisés avec des noms de réseau physique arbitraires. Utilisez une " "liste vide pour désactiver les réseaux centralisés." -msgid "Local IP address of the VXLAN endpoints." -msgstr "Adresse IP locale des points de terminaison VXLAN." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Emplacement du socket de domaine UNIX du proxy de métadonnées." @@ -1988,15 +1951,6 @@ "automatiquement la surcharge du protocole de superposition de cette valeur. " "La valeur par défaut est 1500, valeur standard pour Ethernet." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"Paramètre MTU pour l'unité. Cette option va être retirée dans Newton. " -"Utilisez le paramètre segment_mtu de niveau système qui sera pris en compte " -"par les agents lors de la connexion des VIF." - msgid "MTU size of veth interfaces" msgstr "Taille de MTU des interfaces veth" @@ -2076,9 +2030,6 @@ "La règle d'étiquette de mesure avec remote_ip_prefix %(remote_ip_prefix)s " "chevauche un(e) autre" -msgid "Method cannot be called within a transaction." -msgstr "La méthode ne peut pas être appelée au sein d'une transaction." - msgid "Migration from distributed router to centralized is not supported" msgstr "" "La migration du routeur distribué vers le routeur centralisé n'est pas prise " @@ -2181,17 +2132,6 @@ msgstr "Nom du pont Open vSwitch à utiliser" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"Nom du pont utilisé pour le trafic réseau externe. Doit être défini sur une " -"valeur vide pour Linux Bridge. Lorsque ce paramètre est défini, chaque agent " -"L3 peut être associé à un réseau externe au maximum. Cette option est " -"obsolète et elle sera retirée dans la version M." - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nom de la région nova à utiliser. Utile si keystone gère plusieurs régions." @@ -2267,9 +2207,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin n'est pas configuré ! " -msgid "Neutron plugin provider module" -msgstr "Module du fournisseur de plug-in Neutron" - msgid "New value for first_ip or last_ip has to be specified." msgstr "Une nouvelle valeur doit être spécifiée pour first_ip ou last_ip." @@ -2305,16 +2242,9 @@ msgstr "Aucune migration hors ligne en attente." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "Aucun fournisseur indiqué pour le service de '%s', sortie" - -#, python-format msgid "No shared key in %s fields" msgstr "Aucune clé partagée dans les champs %s" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "Aucun rappel de versions fourni dans ResourceVersionsManager" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Non autorisé à affecter manuellement un routeur à un agent en mode 'dvr'." @@ -2836,13 +2766,6 @@ msgid "Resource body required" msgstr "Corps de ressource obligatoire" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Nom(s) de ressource pris en charge dans les fonctions de quota. Cette option " -"est désormaisobsolète pour retrait." - msgid "Resource not found." msgstr "Ressource non trouvé." @@ -3493,13 +3416,6 @@ "Certains routeurs connectés à ce réseau dépendent de cette stratégie pour " "l'accès." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"Permet de choisir l'infrastructure Web dans laquelle exécuter le serveur API " -"de Neutron. 'pecan' est une nouvelle réécriture expérimentale du serveur API." - msgid "Timeout" msgstr "Délai d'expiration" @@ -4088,14 +4004,6 @@ "pas valide. Il doit être supérieur ou égal à min_l3_agents_per_router " "%(min_agents)s." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"Le paramètre de configuration min_l3_agents_per_router n'est pas valide. Il " -"doit être supérieur ou égal à %s pour la haute disponibilité." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "doit fournir exactement 2 arguments - cidr et MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/it/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/it/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/it/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/it/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -187,13 +187,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' è più piccolo di 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(Obsoleto; utilizzare '--subproject neutron-SERVICE'). Il servizio avanzato " -"su cui eseguire il comando." - msgid "0 is not allowed as CIDR prefix length" msgstr "0 non è consentito come lunghezza del prefisso CIDR" @@ -569,9 +562,6 @@ msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "Impossibile combinare i prefissi IPv4 e IPv6 in un pool di sottorete." -msgid "Cannot specify both --service and --subproject." -msgstr "Impossibile specificare entrambi --service e --subproject." - msgid "Cannot specify both subnet-id and port-id" msgstr "Impossibile specificare entrambi subnet_id e port_id" @@ -1189,9 +1179,6 @@ "L'intervallo finale del tunnel è inferiore all'intervallo iniziale del " "tunnel." -msgid "Enforce using split branches file structure." -msgstr "Applicare l'uso di struttura file con rami suddivisi." - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Errore %(reason)s durante l'operazione." @@ -1397,14 +1384,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Impossibile trovare l'IP mobile %(floatingip_id)s" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"L'IP mobile %(floatingip_id)s è associato all'indirizzo non IPv4 " -"%s(internal_ip)s e pertanto non è possibile collegarlo." - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Per i protocolli TCP/UDP, port_range_min deve essere <= port_range_max" @@ -1487,9 +1466,6 @@ "esecuzione su questa macchina. Tutti gli agent ed i servizi in esecuzione su " "questa macchina devono utilizzare lo stesso valore host." -msgid "How many times Neutron will retry MAC generation" -msgstr "Quante volte Neutron richiamerà la generazione MAC" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1592,13 +1568,6 @@ "Se True, consentire ai plugin che lo supportano di creare reti VLAN " "trasparenti." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"Se non è vuoto, l'agent L3 può solo configurare un router che dispone " -"dell'ID router corrispondente." - msgid "Illegal IP version number" msgstr "Numero della versione IP non valido" @@ -1898,9 +1867,6 @@ "physical_network arbitrari. Utilizzare un elenco vuoto per disabilitare le " "reti flat." -msgid "Local IP address of the VXLAN endpoints." -msgstr "Indirizzo IP locale degli endpoint VXLAN." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Ubicazione per il socket del dominio UNIX del proxy di metadati." @@ -1946,15 +1912,6 @@ "protocollo di sovrapposizione da questo valore. Il valore predefinito è " "impostato su 1500, il valore standard per Ethernet." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"L'impostazione MTU per il dispositivo. Questa opzione verrà rimossa in " -"Newton. Utilizzare l'impostazione segment_mtu a livello di sistema che gli " -"agent considerano quando scrivono i VIF." - msgid "MTU size of veth interfaces" msgstr "Dimensione MTU delle interfacce veth" @@ -2032,9 +1989,6 @@ "La regola di etichetta di misurazione remote_ip_prefix %(remote_ip_prefix)s " "si sovrappone ad un'altra" -msgid "Method cannot be called within a transaction." -msgstr "Il metodo non può essere richiamato all'interno di una transazione." - msgid "Migration from distributed router to centralized is not supported" msgstr "" "La migrazione dal router distribuito al router centralizzato non è " @@ -2134,17 +2088,6 @@ msgstr "Nome del bridge Open vSwitch da utilizzare" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"Nome del bridge utilizzato per il traffico di rete esterno. Deve essere " -"impostato su un valore vuoto per Linux Bridge. Quando questo parametro è " -"impostato, ciascun agent L3 può essere associato con non più di una rete " -"esterna. Questa opzione è obsoleta e verrà rimossa nella release M." - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nome della regione nova da utilizzare. Utile nel caso in cui keystone " @@ -2221,9 +2164,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin non configurato!" -msgid "Neutron plugin provider module" -msgstr "Modulo del provider di plugin Neutron" - msgid "New value for first_ip or last_ip has to be specified." msgstr "È necessario specificare un nuovo valore per first_ip o last_ip." @@ -2259,16 +2199,9 @@ msgstr "Nessuna migrazione offline in sospeso." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "Nessun provider specificato per il servizio '%s', uscita in corso" - -#, python-format msgid "No shared key in %s fields" msgstr "Nessuna chiave condivisa in %s campi" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "Nessun callback di versioni fornito in ResourceVersionsManager" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Attualmente non è consentito assegnare manualmente un router ad un agent in " @@ -2781,13 +2714,6 @@ msgid "Resource body required" msgstr "Corpo risorsa richiesto" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Nomi risorsa supportati nelle funzioni quota. Questa opzione è ora obsoleta " -"per la rimozione." - msgid "Resource not found." msgstr "Risorsa non trovata." @@ -3419,13 +3345,6 @@ "Sono presenti router collegati a questa rete che dipendono da questa " "politica per l'accesso." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"Verrà scelto il framework web in cui eseguire il server Neutron API. 'pecan' " -"è una nuova scrittura sperimentale del server API." - msgid "Timeout" msgstr "Timeout" @@ -4007,14 +3926,6 @@ "valido. Deve essere maggiore o uguale a min_l3_agents_per_router " "%(min_agents)s." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"il parametro di configurazione min_l3_agents_per_router non è valido. Deve " -"essere uguale o maggiore di %s per HA." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "è necessario fornire esattamente 2 argomenti - cidr e MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/ja/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/ja/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/ja/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/ja/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -15,9 +15,9 @@ # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -197,13 +197,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' が 'port_min' よりも小さくなっています" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(提供を終了しています。代わりに '--subproject neutron-SERVICE' を使用してくだ" -"さい。) コマンドの実行の対象となる拡張サービス。" - msgid "0 is not allowed as CIDR prefix length" msgstr "0 は CIDR プレフィックス長として許可されていません" @@ -583,9 +576,6 @@ "1 つのサブネットプールで IPv4 のプレフィックスと IPv6 のプレフィックスを混用" "することはできません。" -msgid "Cannot specify both --service and --subproject." -msgstr "--service と --subproject の両方を指定することはできません。" - msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id と port-id の両方を指定することはできません" @@ -1179,9 +1169,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "トンネル範囲の終わりが、トンネル範囲の開始より小さくなっています" -msgid "Enforce using split branches file structure." -msgstr "分岐ファイル構造を必ず使用するようにします。" - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "操作の試行中に発生したエラー %(reason)s。" @@ -1383,14 +1370,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Floating IP %(floatingip_id)s が見つかりませんでした" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"Floating IP %(floatingip_id)s は IPv4 ではないアドレス %s(internal_ip)s と関" -"連付けられているため、バインドできません。" - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "TCP/UDP プロトコルの場合、port_range_min は port_range_max 以下でなければなり" @@ -1470,9 +1449,6 @@ "スト名。このマシン上で稼働するすべてのエージェントとサービスは同じホスト値を" "使用する必要があります。" -msgid "How many times Neutron will retry MAC generation" -msgstr "Neutron が MAC の生成を再試行する回数" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1576,13 +1552,6 @@ "True の場合、IPAM ドライバーをサポートするプラグインに VLAN トランスペアレン" "トネットワークの作成を許可します。" -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"この値が空でない場合、L3 エージェントは合致するルーター ID を持つルーターのみ" -"を設定することができます。" - msgid "Illegal IP version number" msgstr "IP バージョン番号が正しくありません" @@ -1871,9 +1840,6 @@ "の '*' を使用すると、任意の physical_network 名を持つフラットネットワークを作" "成できます。空のリストを使用すると、フラットネットワークが無効化されます。" -msgid "Local IP address of the VXLAN endpoints." -msgstr "VXLAN エンドポイントのローカル IP アドレス。" - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "メタデータプロキシー UNIX ドメインソケットのロケーション。" @@ -1920,15 +1886,6 @@ "レイプロトコルオーバーヘッドの値を自動的に減算します。デフォルト値は " "Ethernet の標準値である 1500 です。" -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"デバイスの MTU 設定。このオプションは Newton では削除されます。VIF のワイヤリ" -"ングを行う際にエージェントが考慮に入れる、システム全体に関する segment_mtu 設" -"定を使用してください。" - msgid "MTU size of veth interfaces" msgstr "veth インターフェースの MTU サイズ" @@ -2007,9 +1964,6 @@ "remote_ip_prefix %(remote_ip_prefix)s を持つ計測ラベル規則が他の計測ラベル規" "則と重なり合っています" -msgid "Method cannot be called within a transaction." -msgstr "トランザクション内にメソッドを呼び出すことができません" - msgid "Migration from distributed router to centralized is not supported" msgstr "分散ルーターから集中ルーターへの移行はサポートされません" @@ -2107,17 +2061,6 @@ msgstr "使用する Open vSwitch ブリッジの名前" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"外部のネットワークトラフィックが使用できるブリッジの名前。Linux Bridge に関し" -"ては 空の値を設定する必要があります。このパラメーターを設定すると、各 L3 エー" -"ジェントには 1 つの外部ネットワークしか割り当てることができなくなります。この" -"オプションは、M リリースでは提供を終了する予定です。" - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "使用する nova リージョンの名前。Keystone で複数のリージョンを管理する場合に役" @@ -2196,9 +2139,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Neutron の core_plugin が設定されていません。" -msgid "Neutron plugin provider module" -msgstr "Neutron プラグインプロバイダーモジュール" - msgid "New value for first_ip or last_ip has to be specified." msgstr "first_ip または last_ip に対して新規の値を指定する必要があります。" @@ -2234,16 +2174,9 @@ msgstr "オフラインで実行中の移行はありません。" #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "'%s' サービスに対して指定されたプロバイダーはありません。終了します" - -#, python-format msgid "No shared key in %s fields" msgstr "%s フィールドに共有鍵が存在しません" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "ResourceVersionsManager ではバージョンのコールバックは提供されません" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "'dvr' モードのエージェントへの手動でのルーター割り当ては許可されません。" @@ -2738,13 +2671,6 @@ msgid "Resource body required" msgstr "リソース本文が必要です" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"クォータ機能でサポートされるリソースの名前。現在、このオプションは提供を終了" -"しています。" - msgid "Resource not found." msgstr "リソースが見つかりません。" @@ -3375,13 +3301,6 @@ "このネットワークにはルーターが存在し、ルーターはアクセスの際にこのポリシーを" "使用します。" -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"これにより、Neutron の API サーバーを起動する Web フレームワークを選択しま" -"す。'pecan' とは API サーバーを新規に実験的に再作成したものです。" - msgid "Timeout" msgstr "タイムアウト" @@ -3951,14 +3870,6 @@ "max_l3_agents_per_router %(max_agents)s 構成パラメーターが無効です。" "min_l3_agents_per_router %(min_agents)s 以上でなければなりません。" -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"min_l3_agents_per_router 構成パラメーターが無効です。HA では、%s 以上でなけれ" -"ばなりません。" - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必ず 2 つの引数 (cidr および MAC) を提供する必要があります" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po --- neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-error.po 2016-08-29 20:05:49.000000000 +0000 @@ -7,9 +7,9 @@ # Sungjin Kang , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -34,10 +34,6 @@ msgstr "%s 에이전트가 종료됩니다!" #, python-format -msgid "%s failed" -msgstr "%s 실패" - -#, python-format msgid "" "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" @@ -210,17 +206,6 @@ "의 OVS가 필요합니다." msgid "" -"Check for Open vSwitch support of conntrack support failed. OVS/CT firewall " -"will not work. A newer version of OVS (2.5+) and linux kernel (4.3+) are " -"required. See https://github.com/openvswitch/ovs/blob/master/FAQ.mdfor more " -"information." -msgstr "" -"Open vSwitch에서 conntrack을 지원하는지 확인하는 데 실패했습니다. OVS/CT 방화" -"벽이 작동하지 않습니다. 새 버전의 OVS(2.5+) 및 linux 커널(4.3+)이 필요합니" -"다. 자세한 내용은 https://github.com/openvswitch/ovs/blob/master/FAQ.md를 참" -"조하십시오." - -msgid "" "Check for VF management support failed. Please ensure that the version of ip " "link being used has VF support." msgstr "" @@ -423,9 +408,6 @@ msgid "Exception encountered during network rescheduling" msgstr "네트워크 재스케줄링 중에 예외 발생" -msgid "Exception encountered during router rescheduling." -msgstr "라우터 재스케줄링 중에 예외가 발생했습니다." - msgid "Exception loading extension" msgstr "확장을 로드하는 중에 예외 발생" @@ -595,10 +577,6 @@ "네트워크 유형이 지원되지 않으므로 세그먼트 '%s'을(를) 해제하지 못했습니다." #, python-format -msgid "Failed to reschedule router %s" -msgstr "라우터 %s을(를) 재스케줄링하지 못했습니다." - -#, python-format msgid "Failed to schedule network %s" msgstr "네트워크 %s을(를) 스케줄하지 못했습니다." @@ -649,14 +627,6 @@ "IPTablesManager.apply가 다음 iptables 규칙 세트를 적용하지 못함:\n" "%s" -#, python-format -msgid "" -"IPv6 protocol requires a minimum MTU of %(min_mtu)s, while the configured " -"value is %(current_mtu)s" -msgstr "" -"IPv6 프로토콜에는 최소 %(min_mtu)s MTU가 필요한 반면 구성된 값은 " -"%(current_mtu)s입니다." - msgid "" "ImportError exception occurred while loading the external DNS service driver" msgstr "외부 DNS 서비스 드라이버를 로드하는 중에 ImportError 예외 발생" @@ -769,10 +739,6 @@ msgstr "다중 포트가 %s로 시작하는 port_id를 가지고 있습니다" #, python-format -msgid "Network %s has no segments" -msgstr "%s 네트워크에 세그먼트가 없습니다" - -#, python-format msgid "Network %s info call failed." msgstr "네트워크 %s 정보 호출에 실패했습니다." @@ -928,9 +894,6 @@ "서버에서 필수 시간 내에 라우터의 정보를 리턴하지 못하여, 청크 크기가 %s(으)" "로 감소됨" -msgid "Switch connection timeout" -msgstr "스위치 연결 제한시간 초과" - #, python-format msgid "The SNAT namespace %s does not exist for the router." msgstr "라우터에서 SNAT 네임스페이스 %s이(가) 없습니다." @@ -1059,10 +1022,6 @@ "동할 수 있도록 하나를 작성/할당하십시오." #, python-format -msgid "Unable to generate mac address after %s attempts" -msgstr "%s 시도 후 MAC 주소를 생성할 수 없음" - -#, python-format msgid "Unable to get port details for %s" msgstr "%s에대한 포트 세부 사항을 가져올 수 없음" @@ -1187,13 +1146,5 @@ msgstr "포트 %s의 mechanism_manager.update_port_postcommit 실패" #, python-format -msgid "ofctl request %(request)s error %(error)s" -msgstr "Ofctl 요청 %(request)s 오류 %(error)s" - -#, python-format -msgid "ofctl request %(request)s timed out" -msgstr "Ofctl 요청 %(request)s 제한시간 초과" - -#, python-format msgid "tunnel_type %s not supported by agent" msgstr "에이전트에서 지원되지 않는 tunnel_type %s" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po --- neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po 2016-08-29 20:05:49.000000000 +0000 @@ -1,9 +1,9 @@ # Sungjin Kang , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -749,9 +749,5 @@ msgid "agent_updated by server side %s!" msgstr "서버측 %s!에 의한 agent_updated" -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "port_unbound(): net_uuid %s이(가) local_vlan_map에 없음" - msgid "rpc_loop doing a full sync." msgstr "전체 동기화를 수행하는 rpc_loop." diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-warning.po neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-warning.po --- neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-warning.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-warning.po 2016-08-03 20:10:33.000000000 +0000 @@ -1,9 +1,9 @@ # Sungjin Kang , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b2.dev272\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-06-26 18:12+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -368,14 +368,6 @@ "%(agent)s에서 네트워크 %(network)s 제거" #, python-format -msgid "" -"Rescheduling router %(router)s from agent %(agent)s because the agent did " -"not report to the server in the last %(dead_time)s seconds." -msgstr "" -"마지막 %(dead_time)s초에 에이전트가 서버에 보고하지 않았으므로 에이전트 " -"%(agent)s에서 라우터 %(router)s 재스케줄링." - -#, python-format msgid "Respawning %(service)s for uuid %(uuid)s" msgstr "uuid %(uuid)s %(service)s 다시 파생" @@ -401,10 +393,6 @@ "이트하십시오." msgid "" -"The input changed_since must be in the following format: YYYY-MM-DDTHH:MM:SS" -msgstr "input changed_since 형식은 YYYY-MM-DDTHH:MM:SS이어야 함" - -msgid "" "The quota driver neutron.quota.ConfDriver is deprecated as of Liberty. " "neutron.db.quota.driver.DbQuotaDriver should be used in its place" msgstr "" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/ko_KR/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/ko_KR/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -9,16 +9,18 @@ # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Andreas Jaeger , 2016. #zanata +# HYUNGBAI PARK , 2016. #zanata +# jtjang , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: 2016-03-29 08:19+0000\n" -"Last-Translator: SeYeon Lee \n" +"PO-Revision-Date: 2016-08-17 10:52+0000\n" +"Last-Translator: HYUNGBAI PARK \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" @@ -193,13 +195,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max'가 'port_min'보다 작음" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(더 이상 사용되지 않습니다. '--subproject neutron-SERVICE'를 대신 사용하십시" -"오.) 명령을 실행할 고급 서비스입니다. " - msgid "0 is not allowed as CIDR prefix length" msgstr "0은 CIDR 접두부 길이로 허용되지 않음" @@ -552,9 +547,6 @@ msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "서브넷 풀에서 IPv4 및 IPv6 접두부를 혼합하여 사용할 수 없습니다." -msgid "Cannot specify both --service and --subproject." -msgstr "--service와 --subproject를 모두 지정할 수 없습니다. " - msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id와 port-id를 둘 다 지정할 수 없음" @@ -651,6 +643,9 @@ msgid "Client certificate for nova metadata api server." msgstr "nova 메타데이터 api 서버에 대한 클라이언트 인증서입니다." +msgid "Column type and condition operand do not match" +msgstr "컬럼 타입과 조건 피연산자가 일치하지 않음" + msgid "" "Comma-separated list of : tuples, mapping " "network_device to the agent's node-specific list of virtual functions that " @@ -1125,9 +1120,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "터널 범위의 끝이 터널 범위의 시작보다 작음" -msgid "Enforce using split branches file structure." -msgstr "분할 분기 파일 구조 사용을 적용하십시오. " - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "조작 시도 중 오류 %(reason)s이(가) 발생했습니다. " @@ -1285,6 +1277,13 @@ msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "요청을 구문 분석하지 못했습니다. 필수 속성 '%s'이(가) 지정되지 않음" +#, python-format +msgid "" +"Failed to remove provided policy %(policy_id)s because you are not " +"authorized." +msgstr "" +"당신은 권한이 없기 때문에 제공하는 정책 %(policy_id)s을 제거할 수 없습니다." + msgid "Failed to remove supplemental groups" msgstr "보조 그룹을 제거하지 못함" @@ -1325,14 +1324,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "%(floatingip_id)s 부동 IP를 찾을 수 없음" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"부동 IP %(floatingip_id)s이(가) 비IPv4 주소 %s(internal_ip)s과(와) 연관되어 " -"있으므로 바인드할 수 없습니다. " - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "TCP/UDP 프로토콜의 경우 port_range_min은 port_range_max 이하여야 함" @@ -1404,9 +1395,6 @@ "름입니다. 이 시스템에서 실행 중인 모든 에이전트 및 서비스는 같은 호스트 값" "을 사용해야 합니다." -msgid "How many times Neutron will retry MAC generation" -msgstr "Neutron이 MAC 생성을 재시도할 횟수" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1447,6 +1435,9 @@ msgid "IP allocation requires subnet_id or ip_address" msgstr "IP 할당은 subnet_id 또는 ip_address가 필요함" +msgid "IP allocation requires subnets for network" +msgstr "IP 할당은 네트워크의 서브넷을 필요로 합니다." + #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables rules:\n" @@ -1504,13 +1495,6 @@ "True인 경우 이를 지원하는 플러그인을 사용하여 VLAN 투명 네트워크를 작성할 수 " "있습니다." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"비어있지 않으면 l3 에이전트가 일치하는 라우터 ID가 있는 라우터만 구성할 수 있" -"습니다." - msgid "Illegal IP version number" msgstr "올바르지 않은 IP 버전 번호" @@ -1797,9 +1781,6 @@ "에 임의의 physical_network 이름을 사용하려면 기본값 '*'를 사용하십시오. 빈 목" "록을 사용하여 플랫 네트워크를 비활성화합니다." -msgid "Local IP address of the VXLAN endpoints." -msgstr "VXLAN 엔드포인트의 로컬 IP 주소." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "메타데이터 프록시 UNIX 도메인 소켓의 위치입니다." @@ -1843,14 +1824,6 @@ "neutron이 이 값에서 오버레이 프로토콜 오버헤드를 자동으로 제거합니다. 이더넷" "의 표준 값인 1500으로 기본값이 지정됩니다." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"장치의 MTU 설정입니다. 이 옵션은 Newton에서 제거됩니다. VIF를 연결할 때 에이" -"전트에서 고려할 시스템 전체 segment_mtu 설정을 사용하십시오." - msgid "MTU size of veth interfaces" msgstr "veth 인터페이스의 MTU 크기" @@ -1927,9 +1900,6 @@ "remote_ip_prefix %(remote_ip_prefix)s을(를) 가진 측정 레이블 규칙이 다른 항목" "과 겹침" -msgid "Method cannot be called within a transaction." -msgstr "트랜잭션 내에서 메소드를 호출할 수 없습니다." - msgid "Migration from distributed router to centralized is not supported" msgstr "분산 라우터에서 중앙으로 마이그레이션하는 작업은 지원되지 않음" @@ -2016,17 +1986,6 @@ msgstr "사용할 열린 vSwitch 브릿지의 이름" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"외부 네트워크 트래픽에 사용되는 브릿지 이름입니다. Linux Bridge의 빈 값으로 " -"설정해야 합니다. 이 매개변수가 설정되면 각 L3 에이전트를 두 개 이상의 외부 네" -"트워크에 연관시킬 수 있습니다. 이 옵션은 더 이상 사용되지 않으므로 M 릴리스에" -"서 제거됩니다." - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "사용할 nova 리젼의 이름입니다. 키스톤이 둘 이상의 리젼을 관리할 경우 유용합니" @@ -2099,10 +2058,7 @@ msgstr "Neutron 서비스 유형 관리" msgid "Neutron core_plugin not configured!" -msgstr "Neutron core_plugin이 구성되지 않았습니다!" - -msgid "Neutron plugin provider module" -msgstr "Neutron 플러그인 제공자 모듈" +msgstr "Neutron core_plugin이 구성되지 않았습니다" msgid "New value for first_ip or last_ip has to be specified." msgstr "first_ip 또는 last_ip의 새 값을 지정해야 합니다." @@ -2125,6 +2081,9 @@ msgid "No more IP addresses available for subnet %(subnet_id)s." msgstr "서브넷 %(subnet_id)s에 대해 사용 가능한 IP 주소가 더 이상 없습니다. " +msgid "No more IP addresses available." +msgstr "사용 가능한 IP 주소가 더 이상 없습니다." + #, python-format msgid "" "No more Virtual Router Identifier (VRID) available when creating router " @@ -2137,16 +2096,9 @@ msgstr "보류 중인 오프라인 마이그레이션이 없습니다." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "'%s' 서비스에 대해 제공자가 지정되지 않음, 종료하는 중" - -#, python-format msgid "No shared key in %s fields" msgstr "%s 필드의 공유 키가 없음" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "ResourceVersionsManager에 버전 콜백이 제공되지 않음" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "'dvr' 모드에서 수동으로 에이전트에 라우터를 지정할 수 없습니다." @@ -2358,7 +2310,7 @@ #, python-format msgid "Plugin '%s' not found." -msgstr "플러그인 '%s'를 찾을 수 없습니다." +msgstr "플러그인 '%s'를 찾을 수 없습니다" msgid "Plugin does not support updating provider attributes" msgstr "플러그인이 제공자 속성 업데이트를 지원하지 않음" @@ -2613,13 +2565,6 @@ msgid "Resource body required" msgstr "자원 본문 필수" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"할당량 기능에서 지원되는 자원 이름입니다. 이 옵션은 이제 제거를 위해더 이상 " -"사용되지 않습니다. " - msgid "Resource not found." msgstr "자원을 찾을 수 없습니다." @@ -3224,13 +3169,6 @@ "이 네트워크에 연결된 라우터가 있으며, 해당 라우터에는 액세스를 위해 이 정책" "이 필요합니다." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"그러면 Neutron API 서버를 실행할 웹 프레임워크를 선택합니다. 'pecan'은 API 서" -"버를 실험적으로 새로 재작성합니다." - msgid "Timeout" msgstr "제한시간" @@ -3790,14 +3728,6 @@ "min_l3_agents_per_router와 같거나 이보다 커야 합니다.%(min_agents)s과(와) 연" "관되어 있습니다." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"min_l3_agents_per_router 구성 매개변수가 올바르지 않습니다.HA의 %s과(와) 동일" -"하거나 이상이어야 합니다." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "정확히 두 개의 인수 - cidr 및 MAC를 제공해야 함" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/pt_BR/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/pt_BR/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/pt_BR/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/pt_BR/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -7,9 +7,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -188,13 +188,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' é menor que 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(Descontinuado. Use '--subproject neutron-SERVICE' em seu lugar.) O serviço " -"avançado com relação ao qual executar o comando." - msgid "0 is not allowed as CIDR prefix length" msgstr "0 não é permitido como um comprimento do prefixo CIDR" @@ -572,9 +565,6 @@ msgstr "" "Não é possível combinarprefixos IPv4 e IPv6 em um conjunto de sub-rede." -msgid "Cannot specify both --service and --subproject." -msgstr "Não é possível especificar --serviço e --subprojeto." - msgid "Cannot specify both subnet-id and port-id" msgstr "Não é possível especificar subnet-id e port-id" @@ -1190,9 +1180,6 @@ msgstr "" "O término do intervalo do túnel é inferior ao início do intervalo do túnel" -msgid "Enforce using split branches file structure." -msgstr "Impingir usando a estrutura do arquivo de ramificações divididas." - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Erro %(reason)s ao tentar a operação." @@ -1393,14 +1380,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "O IP flutuante %(floatingip_id)s não pôde ser localizado" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"O IP flutuante %(floatingip_id)s está associado ao endereço não IPv4 " -"%s(internal_ip)s e portanto não pode ser vinculado." - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "Para protocolos TCP/UDP, port_range_min deve ser <= port_range_max" @@ -1481,9 +1460,6 @@ "execução nesta máquina. Todos os agentes e serviços em execução nesta " "máquina devem usar o mesmo valor do host." -msgid "How many times Neutron will retry MAC generation" -msgstr "Quantas vezes o Neutron tentará novamente a geração MAC" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1586,13 +1562,6 @@ "Se True, então permita que plug-ins que suportam-no criem redes " "transparentes da VLAN." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"Se não estiver vazio, o agente l3 poderá configurar apenas um roteador que " -"tenha o ID de roteador correspondente." - msgid "Illegal IP version number" msgstr "Número de versão de IP ilegal" @@ -1883,9 +1852,6 @@ "padrão '*' para permitir redes simples com nomes physical_network " "arbitrários. Use uma lista vazia para desativar redes simples." -msgid "Local IP address of the VXLAN endpoints." -msgstr "Endereço IP local dos terminais VXLAN." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Local para soquete de domínio UNIX de Proxy de Metadados." @@ -1929,15 +1895,6 @@ "o Neutron subtrai automaticamente a sobrecarga de protocolo sobreposta desse " "valor. Padronizado para 1500, o valor padrão para Ethernet." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"Configuração de MTU para o dispositivo. Esta opção será removida na versão " -"Newton. Por favor use a configuração system-wide_segment_mtu que os agentes " -"considerarão ao conectar as VIFs." - msgid "MTU size of veth interfaces" msgstr "Tamanho MTU de interfaces vEth" @@ -2014,9 +1971,6 @@ msgstr "" "Regra de marcação com remote_ip_prefix %(remote_ip_prefix)s sobrepõe outra" -msgid "Method cannot be called within a transaction." -msgstr "O método não pode ser chamado dentro de uma transação." - msgid "Migration from distributed router to centralized is not supported" msgstr "A migração do roteador distribuído para centralizado não é suportada" @@ -2112,17 +2066,6 @@ msgstr "Nome da ponte Open vSwitch a ser usado" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"Nome da bridge utilizado para o tráfego de redes externas. Este valor deve " -"ser vazio para a Linux Bridge. Quando este parâmetro é setado, cada agente " -"L3 pode ser associado a não mais de uma rede externa. Esta opção está " -"obsoleta e será removida na versão M." - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Nome da região do nova para utilização. Útil se keystone gerencia mais de " @@ -2199,9 +2142,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin não configurado!" -msgid "Neutron plugin provider module" -msgstr "Módulo do provedor de plug-in Neutron" - msgid "New value for first_ip or last_ip has to be specified." msgstr "Novo valor para first_ip ur last_ip deve ser especificado." @@ -2236,17 +2176,9 @@ msgstr "Nenhuma migração off-line pendente." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "Nenhum provedor especificado para o serviço '%s', saindo" - -#, python-format msgid "No shared key in %s fields" msgstr "Nenhuma chave compartilhada nos campos %s" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "" -"Nenhum retorno de chamada de versões fornecido no ResourceVersionsManager" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "" "Não é permitido designar manualmente um roteador para um agente no modo " @@ -2761,13 +2693,6 @@ msgid "Resource body required" msgstr "Corpo do recurso necessário" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Nomes de recursos que não são suportados em recursos de cota. Esta opção " -"agora foi descontinuada para remoção." - msgid "Resource not found." msgstr "Recurso não encontrado." @@ -3387,13 +3312,6 @@ msgstr "" "Há roteadores conectados a essa rede que dependem dessa política para acesso." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"Isso escolherá a estrutura da web na qual executar o servidor da API " -"Neutron. O 'pecan' é uma nova regravação experimental do servidor da API." - msgid "Timeout" msgstr "Tempo limite" @@ -3970,14 +3888,6 @@ "válido. Ele deve ser maior ou igual a min_l3_agents_per_router " "%(min_agents)s." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"O parâmetro de configuração min_l3_agents_per_router não é válido. Ele deve " -"ser igual ou superior a %s para alta disponibilidade." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Deve-se fornece exatamente 2 argumentos - cidr e MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/ru/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/ru/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/ru/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/ru/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -189,13 +189,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' меньше чем 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(Устарело. Используйте параметр '--subproject neutron-SERVICE'). " -"Дополнительная служба для выполнения команды." - msgid "0 is not allowed as CIDR prefix length" msgstr "Нулевое значение запрещено в качестве длины префикса CIDR" @@ -567,9 +560,6 @@ msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "Нельзя смешивать префиксы IPv4 и IPv6 в пуле подсетей." -msgid "Cannot specify both --service and --subproject." -msgstr "Не удается указать обе опции --service и --subproject." - msgid "Cannot specify both subnet-id and port-id" msgstr "subnet-id и port-id нельзя указывать одновременно" @@ -1177,10 +1167,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "Конечное значение диапазона туннелей меньше его начального значения" -msgid "Enforce using split branches file structure." -msgstr "" -"Выполнить принудительно с помощью разделения ветвей файловой структуры." - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "Ошибка %(reason)s во время выполнения операции." @@ -1381,15 +1367,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "Не найден нефиксированный IP-адрес %(floatingip_id)s" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"Нефиксированный IP-адрес %(floatingip_id)s связан с адресом, " -"%s(internal_ip)s, отличным от IPv4, и поэтому для него нельзя задать " -"ограничение." - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "" "Для протоколов TCP/UDP значение port_range_min должно быть <= port_range_max" @@ -1468,9 +1445,6 @@ "запущенными в этой системе. Все агенты и службы, запущенные в этой системе, " "должны использовать одно и то же значение хоста." -msgid "How many times Neutron will retry MAC generation" -msgstr "Число повторов генерации MAC для Neutron" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1574,13 +1548,6 @@ msgstr "" "Если True, разрешаются модули, поддерживающие создание прозрачных сетей VLAN." -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "" -"Если опция задана, то агент l3 может настроить только маршрутизатор с " -"совпадающим ИД маршрутизатора." - msgid "Illegal IP version number" msgstr "Запрещенный номер версии IP" @@ -1874,9 +1841,6 @@ "Для создания одноуровневых сетей с произвольными именами физических сетей " "используйте символ *. Пустой список запрещает создание одноуровневых сетей." -msgid "Local IP address of the VXLAN endpoints." -msgstr "Локальный IP-адрес конечных точек VXLAN." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Расположение сокета домена UNIX прокси метаданных. " @@ -1920,14 +1884,6 @@ "автоматически вычитает байты, необходимые для протокола перекрытия, из этого " "значения. Значение по умолчанию: 1500, стандартное для Ethernet." -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"Параметр MTU для устройства. Будет удален в Newton. Используйте системное " -"значение segment_mtu для агентов, работающих с VIF." - msgid "MTU size of veth interfaces" msgstr "Размер MTU интерфейсов veth" @@ -2007,9 +1963,6 @@ "Правило метки измерения с remote_ip_prefix %(remote_ip_prefix)s " "перекрывается другим правилом" -msgid "Method cannot be called within a transaction." -msgstr "Метод нельзя вызывать внутри транзакции." - msgid "Migration from distributed router to centralized is not supported" msgstr "" "Миграция с распределенных маршрутизаторов на централизованный не " @@ -2108,17 +2061,6 @@ msgstr "Имя используемого моста Open vSwitch" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"Имя моста, используемого для передачи данных по внешней сети. Для Linux " -"Bridge должно быть задано пустое значение. Если задан этот параметр, то " -"каждый агент L3 может быть связан не более чем с одной внешней сетью. Эта " -"опция устарела и будет удалена в выпуске M." - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "" "Имя используемого региона nova. Необходимо, если keystone управляет " @@ -2194,9 +2136,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Не настроен core_plugin Neutron!" -msgid "Neutron plugin provider module" -msgstr "Модуль провайдера модулей Neutron" - msgid "New value for first_ip or last_ip has to be specified." msgstr "Необходимо указать значение для first_ip или last_ip." @@ -2231,16 +2170,9 @@ msgstr "Нет ожидающих миграций с выключением." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "Не заданы поставщики для службы '%s', выход" - -#, python-format msgid "No shared key in %s fields" msgstr "Нет общего ключа в полях %s" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "В ResourceVersionsManager нет обратного вызова для версий" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "Невозможно вручную присвоить маршрутизатор агенту в режиме 'dvr'." @@ -2742,13 +2674,6 @@ msgid "Resource body required" msgstr "Требуется тело ресурса" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Имена ресурсов, которые поддерживаются в функциях квоты. Эта функция " -"устарела и будет удалена." - msgid "Resource not found." msgstr "Ресурс не найден." @@ -3367,13 +3292,6 @@ msgstr "" "К сети подключены маршрутизаторы, доступ к которым зависит от этой стратегии." -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"Укажите веб-среду, а которой работает сервер API Neutron. 'pecan' - это " -"новый экспериментальный вариант сервера API." - msgid "Timeout" msgstr "Таймаут" @@ -3947,14 +3865,6 @@ "Недопустимый параметр конфигурации max_l3_agents_per_router %(max_agents)s. " "Он должен быть больше либо равен min_l3_agents_per_router %(min_agents)s." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"Недопустимый параметр конфигурации min_l3_agents_per_router. Он должен быть " -"не меньше %s для высокой готовности." - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "Необходимо задать ровно 2 аргумента - cidr и MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po --- neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-error.po 2016-08-03 20:10:33.000000000 +0000 @@ -10,9 +10,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev21\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-07-15 19:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -37,10 +37,6 @@ msgstr "%s Ajanı sonlandırıldı!" #, python-format -msgid "%s failed" -msgstr "%s başarısız" - -#, python-format msgid "" "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" @@ -281,9 +277,6 @@ msgid "Exception encountered during network rescheduling" msgstr "Ağ yeniden zamanlama sırasında istisna oluştu" -msgid "Exception encountered during router rescheduling." -msgstr "Yönlendirici yeniden zamanlama sırasında istisna oluştu." - msgid "Exception loading extension" msgstr "Eklenti yüklenirken istisna" @@ -419,10 +412,6 @@ msgstr "'%s' dilimi bırakılamadı çünkü ağ türü desteklenmiyor." #, python-format -msgid "Failed to reschedule router %s" -msgstr "Yönlendirici %s yeniden zamanlama başarısız" - -#, python-format msgid "Failed to schedule network %s" msgstr "Ağ %s zamanlama başarısız" @@ -499,10 +488,6 @@ msgstr "Birden çok bağlantı noktası %s port_id ile başlıyor" #, python-format -msgid "Network %s has no segments" -msgstr "%s ağının dilimi yok" - -#, python-format msgid "Network %s info call failed." msgstr " %s ağ bilgi çağırısı yapılamıyor." @@ -659,10 +644,6 @@ msgstr "%s ajanı bulunamıyor." #, python-format -msgid "Unable to generate mac address after %s attempts" -msgstr "%s denemeden sonra mac adresi üretilemedi" - -#, python-format msgid "Unable to listen on %(host)s:%(port)s" msgstr "%(host)s:%(port)s dinlenemiyor" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po --- neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-info.po 2016-08-29 20:05:49.000000000 +0000 @@ -1,14 +1,16 @@ # OpenStack Infra , 2015. #zanata +# Alex Eng , 2016. #zanata +# KATO Tomoyuki , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 8.0.1.dev189\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-04-18 05:07+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"PO-Revision-Date: \n" -"Last-Translator: \n" +"PO-Revision-Date: 2016-07-02 08:18+0000\n" +"Last-Translator: KATO Tomoyuki \n" "Language-Team: Turkish (Turkey)\n" "Language: tr-TR\n" "X-Generator: Zanata 3.7.3\n" @@ -75,6 +77,9 @@ msgid "Agent out of sync with plugin!" msgstr "Ajan ve eklenti uyumsuz!" +msgid "All active networks have been fetched through RPC." +msgstr "All active networks have been fetched through RPC." + msgid "" "Allow sorting is enabled because native pagination requires native sorting" msgstr "" @@ -287,6 +292,10 @@ msgstr "L3 ajanı başlatıldı" #, python-format +msgid "Loaded agent extensions: %s" +msgstr "Yüklü ajan eklentiler: %s" + +#, python-format msgid "Loaded extension driver names: %s" msgstr "Yüklenen eklenti sürücü isimleri: %s" @@ -553,7 +562,3 @@ #, python-format msgid "agent_updated by server side %s!" msgstr "ajan sunucu tarafında güncellendi %s!" - -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "port_unbound(): net_uuid %s local_vlan_map içinde değil" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po --- neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron-log-warning.po 2016-08-03 20:10:33.000000000 +0000 @@ -2,9 +2,9 @@ # Alex Eng , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b2.dev272\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-06-26 18:12+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -264,14 +264,6 @@ "%(network)s ağı %(agent)s ajanından çıkarılıyor çünkü ajan sunucuya son " "%(dead_time)s saniye rapor vermedi." -#, python-format -msgid "" -"Rescheduling router %(router)s from agent %(agent)s because the agent did " -"not report to the server in the last %(dead_time)s seconds." -msgstr "" -"Yönlendirici %(router)s %(agent)s ajanından yeniden zamanlanıyor çünkü ajan " -"sunucuya son %(dead_time)s saniye rapor vermedi." - msgid "" "Security group agent binding currently not set. This should be set by the " "end of the init process." diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/tr_TR/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/tr_TR/LC_MESSAGES/neutron.po 2016-08-03 20:10:34.000000000 +0000 @@ -9,9 +9,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev21\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-07-15 19:53+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -805,9 +805,6 @@ msgid "Group (gid or name) running this process after its initialization" msgstr "İlklendirilmesinden sonra bu süreci çalıştıran grup (gid veya isim)" -msgid "How many times Neutron will retry MAC generation" -msgstr "Neutron kaç kere MAC üretmeyi deneyecek" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1013,9 +1010,6 @@ "neutron.ml2.type_drivers isim uzayından yüklenecek ağ türü sürücü giriş " "noktaları listesi." -msgid "Local IP address of the VXLAN endpoints." -msgstr "VXLAN son uçlarının yerel IP adresi." - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "Metadata Vekil UNIX alan soketi için konum." @@ -1164,9 +1158,6 @@ msgid "Neutron core_plugin not configured!" msgstr "Neutron core_plugin yapılandırılmamış!" -msgid "Neutron plugin provider module" -msgstr "Neutron eklenti sağlayıcı modülü" - #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "%s harici ağıyla ilişkilendirilmiş uygun l3 ajanı bulunamadı" @@ -1185,10 +1176,6 @@ "sınırı 254." #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "'%s' servisi için sağlayıcı belirtilmemiş, çıkılıyor" - -#, python-format msgid "" "Not enough l3 agents available to ensure HA. Minimum required " "%(min_agents)s, available %(num_agents)s." @@ -1459,13 +1446,6 @@ msgid "Resource body required" msgstr "Kaynak gövdesi gerekiyor" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"Kota özelliklerinde desteklenen kaynak isim(ler)i. Bu seçenek artık " -"kaldırılmak üzere kullanılmıyor." - msgid "Resource not found." msgstr "Kaynak bulunamadı." @@ -2164,14 +2144,6 @@ "değil. min_l3_agents_per_router %(min_agents)s den büyük ya da ona eşit " "olmalı." -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"min_l3_agents_per_router yapılandırma parametresi geçerli değil. HA için %s " -"den büyük ya da eşit olmalı." - msgid "network_type required" msgstr "network_type gerekli" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/zh_CN/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/zh_CN/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/zh_CN/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/zh_CN/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -15,9 +15,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -190,13 +190,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "“port_max”小于“port_min”" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(建议不要使用。请改为使用“--subproject neutron-SERVICE”。)要对其执行该命令" -"的高级服务。" - msgid "0 is not allowed as CIDR prefix length" msgstr "0不允许作为CIDR前缀长度" @@ -534,9 +527,6 @@ msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "不能在子网池中同时使用 IPv4 前缀和 IPv6 前缀。" -msgid "Cannot specify both --service and --subproject." -msgstr "无法同时指定 --service 和 --subproject。" - msgid "Cannot specify both subnet-id and port-id" msgstr "无法同时指定 subnet-id 和 port-id" @@ -1078,9 +1068,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "隧道范围的结束小于隧道范围的起始" -msgid "Enforce using split branches file structure." -msgstr "强制使用拆分分支文件结构。" - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "尝试执行该操作时发生错误 %(reason)s。" @@ -1268,14 +1255,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "找不到浮动 IP %(floatingip_id)s" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"浮动 IP %(floatingip_id)s 与非 IPv4 地址%s(internal_ip)s 相关联,因此无法绑" -"定。" - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "对于 TCP/UDP 协议,port_range_min 必须小于等于 port_range_max" @@ -1345,9 +1324,6 @@ "Neutron 服务器以及此机器上运行的代理程序和服务要使用的主机名。此机器上运行的" "所有代理程序和服务必须使用同一主机值。" -msgid "How many times Neutron will retry MAC generation" -msgstr "Neutron 将重试 MAC 生成的次数" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1440,11 +1416,6 @@ "networks." msgstr "如果为 True,那么允许那些支持它的插件创建 VLAN 透明网络。" -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "如果非空,那么 L3 代理程序只能配置具有匹配路由器标识的路由器。" - msgid "Illegal IP version number" msgstr "IP 版本号不合法" @@ -1712,9 +1683,6 @@ "可通过其创建平面网络的 physical_network 名称的列表。使用缺省值“*”将允许平面网" "络使用任意 physical_network 名称。使用空列表将禁用平面网络。" -msgid "Local IP address of the VXLAN endpoints." -msgstr "VXLAN 端点的本地 IP 地址。" - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "元数据代理 UNIX 域套接字的位置。" @@ -1756,14 +1724,6 @@ "和 VLAN 网络,neutron 使用此值而不做修改。对于 VXLAN 之类的覆盖网络,neutron " "自动从此值减去覆盖协议开销。缺省为 1500(这是以太网的标准值)。" -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"设备的 MTU 设置。此选项将在 Newton 中移除。请使用系统范围的 segment_mtu 设" -"置,连线 VIF 时,代理程序将考虑这些设置。" - msgid "MTU size of veth interfaces" msgstr "veth 接口的 MTU 大小" @@ -1839,9 +1799,6 @@ "带有 remote_ip_prefix %(remote_ip_prefix)s 的测量标签规则与另一测量标签规则重" "叠" -msgid "Method cannot be called within a transaction." -msgstr "不能在事务中调用方法。" - msgid "Migration from distributed router to centralized is not supported" msgstr "不支持从分布式路由器迁移至集中路由器" @@ -1927,16 +1884,6 @@ msgstr "要使用的已打开 vSwitch 网桥的名称" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"用于外部网络流量的网桥的名称。对于 Linux 网桥,它应设置为空值。如果设置了此参" -"数,那么每个 L3 代理程序最多可与一个外部网络相关联。此选项已不推荐使用,将在 " -"M 发行版中移除。" - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "要使用的 nova 区域的名称。如果 keystone 管理多个区域,那么这很有用。" @@ -2009,9 +1956,6 @@ msgid "Neutron core_plugin not configured!" msgstr "未配置 Neutron core_plugin!" -msgid "Neutron plugin provider module" -msgstr "Neutron 插件提供程序模块" - msgid "New value for first_ip or last_ip has to be specified." msgstr "必须对 first_ip 或 last_ip 指定新值。" @@ -2045,16 +1989,9 @@ msgstr "没有脱机迁移处于暂挂状态。" #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "没有为“%s”服务指定任何提供程序,正在退出" - -#, python-format msgid "No shared key in %s fields" msgstr "%s 字段中没有共享键" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "ResourceVersionsManager 中未提供版本回调" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "不允许以“dvr”方式将路由器手动分配给代理程序。" @@ -2490,12 +2427,6 @@ msgid "Resource body required" msgstr "需要资源主体" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "" -"存在配额功能部件中受支持的资源名称。现在,已建议不要使用此选项,会将其除去。" - msgid "Resource not found." msgstr "找不到资源。" @@ -3055,13 +2986,6 @@ "access." msgstr "根据此策略,有一些路由器附加至此网络以用于访问。" -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"这将选择要运行 Neutron API 服务器的 Web 框架。“pecan”是 API 服务器的新的试验" -"性改写。" - msgid "Timeout" msgstr "超时" @@ -3584,13 +3508,6 @@ "max_l3_agents_per_router %(max_agents)s 配置参数无效。它必须大于或等于 " "min_l3_agents_per_router %(min_agents)s。" -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"min_l3_agents_per_router 配置参数无效。它必须等于或大于 %s,才能确保 HA。" - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必须提供正好 2 个自变量:cidr 和 MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/locale/zh_TW/LC_MESSAGES/neutron.po neutron-9.0.0~b3~dev557/neutron/locale/zh_TW/LC_MESSAGES/neutron.po --- neutron-9.0.0~b2~dev280/neutron/locale/zh_TW/LC_MESSAGES/neutron.po 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/locale/zh_TW/LC_MESSAGES/neutron.po 2016-08-29 20:05:49.000000000 +0000 @@ -6,9 +6,9 @@ # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" -"Project-Id-Version: neutron 9.0.0.0b2.dev47\n" +"Project-Id-Version: neutron 9.0.0.0b3.dev409\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2016-06-06 07:43+0000\n" +"POT-Creation-Date: 2016-08-18 18:59+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" @@ -183,13 +183,6 @@ msgid "'port_max' is smaller than 'port_min'" msgstr "'port_max' 小於 'port_min'" -msgid "" -"(Deprecated. Use '--subproject neutron-SERVICE' instead.) The advanced " -"service to execute the command against." -msgstr "" -"(已遭到淘汰。請改用 '--subproject neutron-SERVICE')要對其執行指令的進階服" -"務。" - msgid "0 is not allowed as CIDR prefix length" msgstr "不接受 0 作為 CIDR 字首長度" @@ -528,9 +521,6 @@ msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool." msgstr "不能在一個子網路儲存區中混合 IPv4 與 IPv6 字首。" -msgid "Cannot specify both --service and --subproject." -msgstr "無法同時指定 --service 和 --subproject。" - msgid "Cannot specify both subnet-id and port-id" msgstr "無法同時指定 subnet-id 及 port-id" @@ -1073,9 +1063,6 @@ msgid "End of tunnel range is less than start of tunnel range" msgstr "通道範圍的終止值小於通道範圍的起始值" -msgid "Enforce using split branches file structure." -msgstr "使用分割分支檔案結構來施行。" - #, python-format msgid "Error %(reason)s while attempting the operation." msgstr "嘗試執行作業時發生錯誤 %(reason)s。" @@ -1263,14 +1250,6 @@ msgid "Floating IP %(floatingip_id)s could not be found" msgstr "找不到浮動 IP %(floatingip_id)s" -#, python-format -msgid "" -"Floating IP %(floatingip_id)s is associated with non-IPv4 address " -"%s(internal_ip)s and therefore cannot be bound." -msgstr "" -"浮動 IP %(floatingip_id)s 與非 IPv4 位址 %s(internal_ip)s 相關聯,因此無法將" -"其連結。" - msgid "For TCP/UDP protocols, port_range_min must be <= port_range_max" msgstr "對於 TCP/UDP 通訊協定,port_range_min 必須 <= port_range_max" @@ -1341,9 +1320,6 @@ "在此機器上執行之 Neutron 伺服器、代理程式及服務要使用的主機名稱。在此機器上執" "行的所有代理程式及服務都必須使用相同的主機值。" -msgid "How many times Neutron will retry MAC generation" -msgstr "Neutron 將重試 MAC 產生作業的次數" - #, python-format msgid "" "ICMP code (port-range-max) %(value)s is provided but ICMP type (port-range-" @@ -1436,11 +1412,6 @@ "networks." msgstr "如果為 True,則容許支援它的外掛程式建立 VLAN 透通網路。" -msgid "" -"If non-empty, the l3 agent can only configure a router that has the matching " -"router ID." -msgstr "如果不是空的,則 L3 代理程式只能配置一個具有相符路由器 ID 的路由器。" - msgid "Illegal IP version number" msgstr "無效的 IP 版本號碼" @@ -1710,9 +1681,6 @@ "可用來建立平面網路的 physical_network 名稱清單。使用預設值 '*' 可容許含有任" "意 physical_network 名稱的平面網路。使用空白清單可停用平面網路。" -msgid "Local IP address of the VXLAN endpoints." -msgstr "VXLAN 端點的本端 IP 位址。" - msgid "Location for Metadata Proxy UNIX domain socket." msgstr "meta 資料 Proxy UNIX 網域 Socket 的位置" @@ -1755,14 +1723,6 @@ "Neutron 會自動從此值中扣除套版通訊協定額外負擔。預設值為 1500,這是乙太網路的" "標準值。" -msgid "" -"MTU setting for device. This option will be removed in Newton. Please use " -"the system-wide segment_mtu setting which the agents will take into account " -"when wiring VIFs." -msgstr "" -"裝置的 MTU 設定。這個選項將在 Newton 中予以移除。請使用系統範圍的 " -"segment_mtu 設定,代理程式在連接 VIF 時將考量此設定。" - msgid "MTU size of veth interfaces" msgstr "veth 介面的 MTU 大小" @@ -1838,9 +1798,6 @@ "計量標籤規則 (remote_ip_prefix = %(remote_ip_prefix)s),與另一個計量標籤規則" "重疊" -msgid "Method cannot be called within a transaction." -msgstr "無法在交易內呼叫方法。" - msgid "Migration from distributed router to centralized is not supported" msgstr "不支援從分散式路由器移轉至集中式" @@ -1925,16 +1882,6 @@ msgstr "要使用的 Open vSwitch 橋接器名稱" msgid "" -"Name of bridge used for external network traffic. This should be set to an " -"empty value for the Linux Bridge. When this parameter is set, each L3 agent " -"can be associated with no more than one external network. This option is " -"deprecated and will be removed in the M release." -msgstr "" -"用於外部網路資料流量的橋接器名稱。針對 Linux 橋接器,應該將此參數設為空值。設" -"定此參數時,每一個 L3 代理程式都可與不超過 1 個外部網路建立關聯。這個選項已淘" -"汰,並且將在 M 發行版中予以移除。" - -msgid "" "Name of nova region to use. Useful if keystone manages more than one region." msgstr "要使用的 Nova 區域名稱。如果 Keystone 管理多個區域,則很有用。" @@ -2007,9 +1954,6 @@ msgid "Neutron core_plugin not configured!" msgstr "未配置 Neutron core_plugin!" -msgid "Neutron plugin provider module" -msgstr "Neutron 外掛程式提供者模組" - msgid "New value for first_ip or last_ip has to be specified." msgstr "必須指定 first_ip 或 last_ip 的新值。" @@ -2043,16 +1987,9 @@ msgstr "沒有擱置中的離線移轉。" #, python-format -msgid "No providers specified for '%s' service, exiting" -msgstr "未給 '%s' 服務指定提供者,正在結束" - -#, python-format msgid "No shared key in %s fields" msgstr "%s 欄位中沒有共用金鑰" -msgid "No versions callback provided in ResourceVersionsManager" -msgstr "ResourceVersionsManager 中未提供版本回呼" - msgid "Not allowed to manually assign a router to an agent in 'dvr' mode." msgstr "不容許將路由器手動指派給處於 'dvr' 模式的代理程式。" @@ -2489,11 +2426,6 @@ msgid "Resource body required" msgstr "需要資源主體" -msgid "" -"Resource name(s) that are supported in quota features. This option is now " -"deprecated for removal." -msgstr "配額功能中支援的資源名稱。現在,這個選項已遭到淘汰,將予以移除。" - msgid "Resource not found." msgstr "找不到資源。" @@ -3062,13 +2994,6 @@ "access." msgstr "有依賴於此存取原則的路由器已連接至此網路。" -msgid "" -"This will choose the web framework in which to run the Neutron API server. " -"'pecan' is a new experiemental rewrite of the API server." -msgstr "" -"這將選擇要在其中執行 Neutron API 伺服器的 Web 架構。'pecan' 是 API 伺服器的新" -"試驗性重新撰寫。" - msgid "Timeout" msgstr "逾時" @@ -3598,13 +3523,6 @@ "max_l3_agents_per_router %(max_agents)s 配置參數無效。它必須大於或等於 " "min_l3_agents_per_router %(min_agents)s。" -#, python-format -msgid "" -"min_l3_agents_per_router config parameter is not valid. It has to be equal " -"to or more than %s for HA." -msgstr "" -"min_l3_agents_per_router 配置參數無效。該配置參數必須等於或大於 HA 的 %s。" - msgid "must provide exactly 2 arguments - cidr and MAC" msgstr "必須提供 2 個確切引數 - cidr 和 MAC" diff -Nru neutron-9.0.0~b2~dev280/neutron/notifiers/nova.py neutron-9.0.0~b3~dev557/neutron/notifiers/nova.py --- neutron-9.0.0~b2~dev280/neutron/notifiers/nova.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/notifiers/nova.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,6 +15,7 @@ from keystoneauth1 import loading as ks_loading from neutron_lib import constants +from neutron_lib import exceptions as exc from novaclient import client as nova_client from novaclient import exceptions as nova_exceptions from oslo_config import cfg @@ -23,6 +24,9 @@ from sqlalchemy.orm import attributes as sql_attr from neutron._i18n import _LE, _LI, _LW +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron import context from neutron import manager from neutron.notifiers import batch_notifier @@ -68,11 +72,21 @@ self.batch_notifier = batch_notifier.BatchNotifier( cfg.CONF.send_events_interval, self.send_events) + # register callbacks for events pertaining resources affecting Nova + callback_resources = ( + resources.FLOATING_IP, + resources.PORT, + ) + for resource in callback_resources: + registry.subscribe(self._send_nova_notification, + resource, events.BEFORE_RESPONSE) + def _is_compute_port(self, port): try: if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) - and port['device_owner'].startswith( - constants.DEVICE_OWNER_COMPUTE_PREFIX)): + and port['device_owner'].startswith(( + constants.DEVICE_OWNER_COMPUTE_PREFIX, + constants.DEVICE_OWNER_BAREMETAL_PREFIX))): return True except (KeyError, AttributeError): pass @@ -96,6 +110,11 @@ self._plugin_ref = manager.NeutronManager.get_plugin() return self._plugin_ref + def _send_nova_notification(self, resource, event, trigger, + action=None, original=None, data=None, + **kwargs): + self.send_network_change(action, original, data) + def send_network_change(self, action, original_obj, returned_obj): """Called when a network change is made that nova cares about. @@ -141,7 +160,12 @@ return ctx = context.get_admin_context() - port = self._plugin.get_port(ctx, port_id) + try: + port = self._plugin.get_port(ctx, port_id) + except exc.PortNotFound: + LOG.debug("Port %s was deleted, no need to send any " + "notification", port_id) + return if port and self._is_compute_port(port): if action == 'delete_port': @@ -149,26 +173,31 @@ else: return self._get_network_changed_event(port['device_id']) - def record_port_status_changed(self, port, current_port_status, - previous_port_status, initiator): - """Determine if nova needs to be notified due to port status change. - """ - # clear out previous _notify_event - port._notify_event = None - # If there is no device_id set there is nothing we can do here. - if not port.device_id: - LOG.debug("device_id is not set on port yet.") - return - + def _can_notify(self, port): if not port.id: LOG.warning(_LW("Port ID not set! Nova will not be notified of " "port status change.")) - return + return False + + # If there is no device_id set there is nothing we can do here. + if not port.device_id: + LOG.debug("device_id is not set on port %s yet.", port.id) + return False # We only want to notify about nova ports. if not self._is_compute_port(port): - return + return False + + return True + def record_port_status_changed(self, port, current_port_status, + previous_port_status, initiator): + """Determine if nova needs to be notified due to port status change. + """ + # clear out previous _notify_event + port._notify_event = None + if not self._can_notify(port): + return # We notify nova when a vif is unplugged which only occurs when # the status goes from ACTIVE to DOWN. if (previous_port_status == constants.PORT_STATUS_ACTIVE and @@ -205,6 +234,24 @@ self.batch_notifier.queue_event(event) port._notify_event = None + def notify_port_active_direct(self, port): + """Notify nova about active port + + Used when port was wired on the host other than port's current host + according to port binding. This happens during live migration. + In this case ml2 plugin skips port status update but we still we need + to notify nova. + """ + if not self._can_notify(port): + return + + port._notify_event = ( + {'server_uuid': port.device_id, + 'name': VIF_PLUGGED, + 'status': 'completed', + 'tag': port.id}) + self.send_port_status(None, None, port) + def send_events(self, batched_events): LOG.debug("Sending events: %s", batched_events) try: diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/address_scope.py neutron-9.0.0~b3~dev557/neutron/objects/address_scope.py --- neutron-9.0.0~b2~dev280/neutron/objects/address_scope.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/address_scope.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,7 +15,7 @@ from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields -from neutron.db import address_scope_db as models +from neutron.db.models import address_scope as models from neutron.objects import base from neutron.objects import common_types diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/base.py neutron-9.0.0~b3~dev557/neutron/objects/base.py --- neutron-9.0.0~b2~dev280/neutron/objects/base.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/base.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,6 +24,7 @@ from neutron._i18n import _ from neutron.db import api as db_api from neutron.db import model_base +from neutron.db import standard_attr from neutron.objects.db import api as obj_db_api from neutron.objects.extensions import standardattributes @@ -68,6 +69,10 @@ "foreign key") +class NeutronSyntheticFieldsForeignKeysNotFound(exceptions.NeutronException): + message = _("%(child)s does not define a foreign key for %(parent)s") + + def get_updatable_fields(cls, fields): fields = fields.copy() for field in cls.fields_no_update: @@ -111,6 +116,12 @@ context, model, id=self.marker) return res + def __str__(self): + return str(self.__dict__) + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + @six.add_metaclass(abc.ABCMeta) class NeutronObject(obj_base.VersionedObject, @@ -196,21 +207,53 @@ def delete(self): raise NotImplementedError() + @classmethod + def count(cls, context, **kwargs): + '''Count the number of objects matching filtering criteria.''' + return len(cls.get_objects(context, **kwargs)) + class DeclarativeObject(abc.ABCMeta): def __init__(cls, name, bases, dct): super(DeclarativeObject, cls).__init__(name, bases, dct) + if 'project_id' in cls.fields: + obj_extra_fields_set = set(cls.obj_extra_fields) + obj_extra_fields_set.add('tenant_id') + cls.obj_extra_fields = list(obj_extra_fields_set) + setattr(cls, 'tenant_id', property(lambda x: x.project_id)) + + fields_no_update_set = set(cls.fields_no_update) for base in itertools.chain([cls], bases): + keys_set = set() if hasattr(base, 'primary_keys'): - cls.fields_no_update += base.primary_keys - # avoid duplicate entries - cls.fields_no_update = list(set(cls.fields_no_update)) + keys_set.update(base.primary_keys) + if hasattr(base, 'obj_extra_fields'): + keys_set.update(base.obj_extra_fields) + for key in keys_set: + if key in cls.fields or key in cls.obj_extra_fields: + fields_no_update_set.add(key) + cls.fields_no_update = list(fields_no_update_set) + + # generate unique_keys from the model + model = getattr(cls, 'db_model', None) + if model and not getattr(cls, 'unique_keys', None): + cls.unique_keys = [] + obj_field_names = set(cls.fields.keys()) + model_to_obj_translation = { + v: k for (k, v) in cls.fields_need_translation.items()} + + for model_unique_key in model_base.get_unique_keys(model): + obj_unique_key = [model_to_obj_translation.get(key, key) + for key in model_unique_key] + if obj_field_names.issuperset(obj_unique_key): + cls.unique_keys.append(obj_unique_key) + if (hasattr(cls, 'has_standard_attributes') and cls.has_standard_attributes()): standardattributes.add_standard_attributes(cls) # Instantiate extra filters per class - cls.extra_filter_names = set() + cls.extra_filter_names = set(cls.extra_filter_names) @six.add_metaclass(DeclarativeObject) @@ -221,6 +264,11 @@ primary_keys = ['id'] + # 'unique_keys' is a list of unique keys that can be used with get_object + # instead of 'primary_keys' (e.g. [['key1'], ['key2a', 'key2b']]). + # By default 'unique_keys' will be inherited from the 'db_model' + unique_keys = [] + # this is a dict to store the association between the foreign key and the # corresponding key in the main table, e.g. port extension have 'port_id' # as foreign key, that is associated with the key 'id' of the table Port, @@ -232,8 +280,20 @@ fields_no_update = [] # dict with name mapping: {'field_name_in_object': 'field_name_in_db'} + # It can be used also as DB relationship mapping to synthetic fields name. + # It is needed to load synthetic fields with one SQL query using side + # loaded entities. + # Examples: {'synthetic_field_name': 'relationship_name_in_model'} + # {'field_name_in_object': 'field_name_in_db'} fields_need_translation = {} + # obj_extra_fields defines properties that are not part of the model + # but we want to expose them for easier usage of the object. + # Handling of obj_extra_fields is in oslo.versionedobjects. + # The extra fields can be accessed as read only property and are exposed + # in to_dict() + # obj_extra_fields = [] + def from_db_object(self, *objs): db_objs = [self.modify_fields_from_db(db_obj) for db_obj in objs] for field in self.fields: @@ -241,13 +301,15 @@ if field in db_obj and not self.is_synthetic(field): setattr(self, field, db_obj[field]) break - self.load_synthetic_db_fields() + for obj in objs: + self.load_synthetic_db_fields(obj) self.obj_reset_changes() @classmethod def has_standard_attributes(cls): return bool(cls.db_model and - issubclass(cls.db_model, model_base.HasStandardAttributes)) + issubclass(cls.db_model, + standard_attr.HasStandardAttributes)) @classmethod def modify_fields_to_db(cls, fields): @@ -321,8 +383,10 @@ :param kwargs: multiple keys defined by key=value pairs :return: single object of NeutronDbObject class """ - missing_keys = set(cls.primary_keys).difference(kwargs.keys()) - if missing_keys: + lookup_keys = set(kwargs.keys()) + all_keys = itertools.chain([cls.primary_keys], cls.unique_keys) + if not any(lookup_keys.issuperset(keys) for keys in all_keys): + missing_keys = set(cls.primary_keys).difference(lookup_keys) raise NeutronPrimaryKeyMissing(object_class=cls.__class__, missing_keys=missing_keys) @@ -358,6 +422,12 @@ return (context.is_admin or context.tenant_id == db_obj.tenant_id) + @staticmethod + def filter_to_str(value): + if isinstance(value, list): + return [str(val) for val in value] + return str(value) + def _get_changed_persistent_fields(self): fields = self.obj_get_changes() for field in self.synthetic_fields: @@ -373,7 +443,7 @@ return fields - def load_synthetic_db_fields(self): + def load_synthetic_db_fields(self, db_obj=None): """ Load the synthetic fields that are stored in a different table from the main object. @@ -381,6 +451,7 @@ This method doesn't take care of loading synthetic fields that aren't stored in the DB, e.g. 'shared' in RBAC policy. """ + clsname = self.__class__.__name__ # TODO(rossella_s) Find a way to handle ObjectFields with # subclasses=True @@ -398,16 +469,35 @@ # QosRule continue objclass = objclasses[0] - if len(objclass.foreign_keys.keys()) > 1: + foreign_keys = objclass.foreign_keys.get(clsname) + if not foreign_keys: + raise NeutronSyntheticFieldsForeignKeysNotFound( + parent=clsname, child=objclass.__name__) + if len(foreign_keys.keys()) > 1: raise NeutronSyntheticFieldMultipleForeignKeys(field=field) - objs = objclass.get_objects( - self.obj_context, **{ - k: getattr( - self, v) for k, v in objclass.foreign_keys.items()}) + + synthetic_field_db_name = ( + self.fields_need_translation.get(field, field)) + synth_db_objs = (db_obj.get(synthetic_field_db_name, None) + if db_obj else None) + + # synth_db_objs can be list, empty list or None, that is why + # we need 'is not None', because [] is valid case for 'True' + if synth_db_objs is not None: + if not isinstance(synth_db_objs, list): + synth_db_objs = [synth_db_objs] + synth_objs = [objclass._load_object(self.obj_context, + objclass.modify_fields_from_db(obj)) + for obj in synth_db_objs] + else: + synth_objs = objclass.get_objects( + self.obj_context, **{ + k: getattr(self, v) + for k, v in foreign_keys.items()}) if isinstance(self.fields[field], obj_fields.ObjectField): - setattr(self, field, objs[0] if objs else None) + setattr(self, field, synth_objs[0] if synth_objs else None) else: - setattr(self, field, objs) + setattr(self, field, synth_objs) self.obj_reset_changes([field]) def create(self): @@ -429,8 +519,8 @@ keys[key] = getattr(self, key) return keys - def update_nonidentifying_fields(self, obj_data, reset_changes=False): - """Updates non-identifying fields of an object. + def update_fields(self, obj_data, reset_changes=False): + """Updates fields of an object that are not forbidden to be updated. :param obj_data: the full set of object data :type obj_data: dict @@ -440,11 +530,10 @@ :returns: None """ - if reset_changes: self.obj_reset_changes() for k, v in obj_data.items(): - if k not in self.primary_keys: + if k not in self.fields_no_update: setattr(self, k, v) def update(self): @@ -464,3 +553,17 @@ obj_db_api.delete_object(self.obj_context, self.db_model, **self.modify_fields_to_db( self._get_composite_keys())) + + @classmethod + def count(cls, context, **kwargs): + """ + Count the number of objects matching filtering criteria. + + :param context: + :param kwargs: multiple keys defined by key=value pairs + :return: number of matching objects + """ + cls.validate_filters(**kwargs) + return obj_db_api.count( + context, cls.db_model, **cls.modify_fields_to_db(kwargs) + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/common_types.py neutron-9.0.0~b3~dev557/neutron/objects/common_types.py --- neutron-9.0.0~b2~dev280/neutron/objects/common_types.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/common_types.py 2016-08-29 20:05:49.000000000 +0000 @@ -11,14 +11,16 @@ # License for the specific language governing permissions and limitations # under the License. +import itertools + import netaddr -from neutron_lib import constants as n_const +from neutron_lib import constants as lib_constants +from neutron_lib import exceptions from oslo_versionedobjects import fields as obj_fields import six from neutron._i18n import _ from neutron.common import constants -from neutron.common import exceptions class NeutronRangeConstrainedIntegerInvalidLimit(exceptions.NeutronException): @@ -27,7 +29,7 @@ class IPV6ModeEnumField(obj_fields.AutoTypedField): - AUTO_TYPE = obj_fields.Enum(valid_values=constants.IPV6_MODES) + AUTO_TYPE = obj_fields.Enum(valid_values=lib_constants.IPV6_MODES) class RangeConstrainedInteger(obj_fields.Integer): @@ -54,7 +56,7 @@ """IP network (CIDR) prefix length custom Enum""" def __init__(self, **kwargs): super(IPNetworkPrefixLen, self).__init__( - start=0, end=n_const.IPv6_BITS, + start=0, end=lib_constants.IPv6_BITS, **kwargs) @@ -62,6 +64,16 @@ AUTO_TYPE = IPNetworkPrefixLen() +class PortRange(RangeConstrainedInteger): + def __init__(self, **kwargs): + super(PortRange, self).__init__(start=constants.PORT_RANGE_MIN, + end=constants.PORT_RANGE_MAX, **kwargs) + + +class PortRangeField(obj_fields.AutoTypedField): + AUTO_TYPE = PortRange() + + class ListOfIPNetworksField(obj_fields.AutoTypedField): AUTO_TYPE = obj_fields.List(obj_fields.IPNetwork()) @@ -121,9 +133,21 @@ AUTO_TYPE = obj_fields.Enum(valid_values=constants.VALID_ETHERTYPES) +class IpProtocolEnum(obj_fields.Enum): + """IP protocol number Enum""" + def __init__(self, **kwargs): + super(IpProtocolEnum, self).__init__( + valid_values=list( + itertools.chain( + lib_constants.IP_PROTOCOL_MAP.keys(), + [str(v) for v in lib_constants.IP_PROTOCOL_MAP.values()] + ) + ), + **kwargs) + + class IpProtocolEnumField(obj_fields.AutoTypedField): - AUTO_TYPE = obj_fields.Enum( - valid_values=list(n_const.IP_PROTOCOL_MAP.keys())) + AUTO_TYPE = IpProtocolEnum() class MACAddress(obj_fields.FieldType): diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/db/api.py neutron-9.0.0~b3~dev557/neutron/objects/db/api.py --- neutron-9.0.0~b2~dev280/neutron/objects/db/api.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/db/api.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,12 +17,21 @@ # Common database operation implementations -def get_object(context, model, **kwargs): - # TODO(jlibosva): decompose _model_query from plugin instance +def _get_filter_query(context, model, **kwargs): + # TODO(jlibosva): decompose _get_collection_query from plugin instance plugin = manager.NeutronManager.get_plugin() with context.session.begin(subtransactions=True): - return plugin._model_query(context, model).filter_by( - **kwargs).first() + filters = _kwargs_to_filters(**kwargs) + query = plugin._get_collection_query(context, model, filters) + return query + + +def get_object(context, model, **kwargs): + return _get_filter_query(context, model, **kwargs).first() + + +def count(context, model, **kwargs): + return _get_filter_query(context, model, **kwargs).count() def _kwargs_to_filters(**kwargs): diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/extensions/port_security.py neutron-9.0.0~b3~dev557/neutron/objects/extensions/port_security.py --- neutron-9.0.0~b2~dev280/neutron/objects/extensions/port_security.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/extensions/port_security.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import fields as obj_fields + +from neutron.extensions import portsecurity +from neutron.objects import base + + +class _PortSecurity(base.NeutronDbObject): + fields = { + 'id': obj_fields.UUIDField(), + 'port_security_enabled': obj_fields.BooleanField( + default=portsecurity.DEFAULT_PORT_SECURITY), + } diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/extensions/standardattributes.py neutron-9.0.0~b3~dev557/neutron/objects/extensions/standardattributes.py --- neutron-9.0.0~b2~dev280/neutron/objects/extensions/standardattributes.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/extensions/standardattributes.py 2016-08-03 20:10:34.000000000 +0000 @@ -13,6 +13,7 @@ from oslo_versionedobjects import fields as obj_fields STANDARD_ATTRIBUTES = { + 'revision_number': obj_fields.IntegerField(), 'description': obj_fields.StringField(nullable=True), 'created_at': obj_fields.DateTimeField(nullable=True, tzinfo_aware=False), 'updated_at': obj_fields.DateTimeField(nullable=True, tzinfo_aware=False), diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/network/extensions/port_security.py neutron-9.0.0~b3~dev557/neutron/objects/network/extensions/port_security.py --- neutron-9.0.0~b2~dev280/neutron/objects/network/extensions/port_security.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/network/extensions/port_security.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import base as obj_base + +from neutron.db.port_security import models +from neutron.objects.extensions import port_security as base_ps + + +@obj_base.VersionedObjectRegistry.register +class NetworkPortSecurity(base_ps._PortSecurity): + # Version 1.0: Initial version + VERSION = "1.0" + + fields_need_translation = {'id': 'network_id'} + + db_model = models.NetworkSecurityBinding diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/network/network_segment.py neutron-9.0.0~b3~dev557/neutron/objects/network/network_segment.py --- neutron-9.0.0~b2~dev280/neutron/objects/network/network_segment.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/network/network_segment.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields + +from neutron.db import segments_db as segment_model +from neutron.objects import base + + +@obj_base.VersionedObjectRegistry.register +class NetworkSegment(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = segment_model.NetworkSegment + + fields = { + 'id': obj_fields.UUIDField(), + 'network_id': obj_fields.UUIDField(), + 'network_type': obj_fields.StringField(), + 'physical_network': obj_fields.StringField(nullable=True), + 'segmentation_id': obj_fields.IntegerField(nullable=True), + 'is_dynamic': obj_fields.BooleanField(default=False), + 'segment_index': obj_fields.IntegerField(default=0) + } + + foreign_keys = {'Network': {'network_id': 'id'}} diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/port/extensions/allowedaddresspairs.py neutron-9.0.0~b3~dev557/neutron/objects/port/extensions/allowedaddresspairs.py --- neutron-9.0.0~b2~dev280/neutron/objects/port/extensions/allowedaddresspairs.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/port/extensions/allowedaddresspairs.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,7 +14,7 @@ from oslo_versionedobjects import fields as obj_fields from neutron.common import utils -from neutron.db.allowed_address_pairs import models +from neutron.db.models import allowed_address_pair as models from neutron.objects import base from neutron.objects import common_types @@ -40,9 +40,9 @@ def modify_fields_to_db(cls, fields): result = super(AllowedAddressPair, cls).modify_fields_to_db(fields) if 'ip_address' in result: - result['ip_address'] = str(result['ip_address']) + result['ip_address'] = cls.filter_to_str(result['ip_address']) if 'mac_address' in result: - result['mac_address'] = str(result['mac_address']) + result['mac_address'] = cls.filter_to_str(result['mac_address']) return result # TODO(mhickey): get rid of it once we switch the db model to using diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/port/extensions/port_security.py neutron-9.0.0~b3~dev557/neutron/objects/port/extensions/port_security.py --- neutron-9.0.0~b2~dev280/neutron/objects/port/extensions/port_security.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/port/extensions/port_security.py 2016-08-03 20:10:34.000000000 +0000 @@ -1,5 +1,3 @@ -# Copyright 2013 VMware, Inc. All rights reserved. -# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -13,22 +11,16 @@ # under the License. from oslo_versionedobjects import base as obj_base -from oslo_versionedobjects import fields as obj_fields -from neutron.db import portsecurity_db_common as models -from neutron.objects import base +from neutron.db.port_security import models +from neutron.objects.extensions import port_security as base_ps @obj_base.VersionedObjectRegistry.register -class PortSecurity(base.NeutronDbObject): +class PortSecurity(base_ps._PortSecurity): # Version 1.0: Initial version VERSION = "1.0" - db_model = models.PortSecurityBinding - - primary_keys = ['port_id'] + fields_need_translation = {'id': 'port_id'} - fields = { - 'port_id': obj_fields.UUIDField(), - 'port_security_enabled': obj_fields.BooleanField(default=True), - } + db_model = models.PortSecurityBinding diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/qos/rule.py neutron-9.0.0~b3~dev557/neutron/objects/qos/rule.py --- neutron-9.0.0~b2~dev280/neutron/objects/qos/rule.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/qos/rule.py 2016-08-03 20:10:34.000000000 +0000 @@ -17,7 +17,9 @@ import sys from neutron_lib import constants +from oslo_utils import versionutils from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import exception from oslo_versionedobjects import fields as obj_fields import six @@ -107,4 +109,11 @@ DSCP_MARK: common_types.DscpMarkField(), } - rule_type = qos_consts.RULE_TYPE_DSCP_MARK + rule_type = qos_consts.RULE_TYPE_DSCP_MARKING + + def obj_make_compatible(self, primitive, target_version): + _target_version = versionutils.convert_version_to_tuple(target_version) + if _target_version < (1, 1): + raise exception.IncompatibleObjectVersion( + objver=target_version, + objname="QosDscpMarkingRule") diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/qos/rule_type.py neutron-9.0.0~b3~dev557/neutron/objects/qos/rule_type.py --- neutron-9.0.0~b2~dev280/neutron/objects/qos/rule_type.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/qos/rule_type.py 2016-08-03 20:10:34.000000000 +0000 @@ -32,7 +32,6 @@ # Version 1.1: Added DscpMarkingRule VERSION = '1.1' - #TODO(davidsha) add obj_make_compatible and associated tests. fields = { 'type': RuleTypeField(), } diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/rbac_db.py neutron-9.0.0~b3~dev557/neutron/objects/rbac_db.py --- neutron-9.0.0~b2~dev280/neutron/objects/rbac_db.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/rbac_db.py 2016-08-03 20:10:34.000000000 +0000 @@ -51,16 +51,35 @@ :returns: set -- a set of tenants' ids dependent on this object. """ + @staticmethod + def is_network_shared(context, rbac_entries): + # NOTE(korzen) this method is copied from db_base_plugin_common. + # The shared attribute for a network now reflects if the network + # is shared to the calling tenant via an RBAC entry. + matches = ('*',) + ((context.tenant_id,) if context else ()) + for entry in rbac_entries: + if (entry.action == models.ACCESS_SHARED and + entry.target_tenant in matches): + return True + return False + + @staticmethod + def get_shared_with_tenant(context, rbac_db_model, obj_id, tenant_id): + # NOTE(korzen) This method enables to query within already started + # session + return (common_db_mixin.model_query(context, rbac_db_model).filter( + and_(rbac_db_model.object_id == obj_id, + rbac_db_model.action == models.ACCESS_SHARED, + rbac_db_model.target_tenant.in_( + ['*', tenant_id]))).count() != 0) + @classmethod def is_shared_with_tenant(cls, context, obj_id, tenant_id): ctx = context.elevated() rbac_db_model = cls.rbac_db_model with ctx.session.begin(subtransactions=True): - return (common_db_mixin.model_query(ctx, rbac_db_model).filter( - and_(rbac_db_model.object_id == obj_id, - rbac_db_model.action == models.ACCESS_SHARED, - rbac_db_model.target_tenant.in_( - ['*', tenant_id]))).count() != 0) + return cls.get_shared_with_tenant(ctx, rbac_db_model, + obj_id, tenant_id) @classmethod def is_accessible(cls, context, db_obj): diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/securitygroup.py neutron-9.0.0~b3~dev557/neutron/objects/securitygroup.py --- neutron-9.0.0~b2~dev280/neutron/objects/securitygroup.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/securitygroup.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,131 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_versionedobjects import base as obj_base +from oslo_versionedobjects import fields as obj_fields + +from neutron.common import utils +from neutron.db import api as db_api +from neutron.db.models import securitygroup as sg_models +from neutron.objects import base +from neutron.objects import common_types + + +@obj_base.VersionedObjectRegistry.register +class SecurityGroup(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = sg_models.SecurityGroup + + fields = { + 'id': obj_fields.UUIDField(), + 'name': obj_fields.StringField(nullable=True), + 'project_id': obj_fields.StringField(nullable=True), + 'is_default': obj_fields.BooleanField(default=False), + 'rules': obj_fields.ListOfObjectsField( + 'SecurityGroupRule', nullable=True + ), + # NOTE(ihrachys): we don't include source_rules that is present in the + # model until we realize it's actually needed + } + + fields_no_update = ['project_id', 'is_default'] + + synthetic_fields = ['is_default', 'rules'] + + extra_filter_names = {'is_default'} + + def create(self): + # save is_default before super() resets it to False + is_default = self.is_default + with db_api.autonested_transaction(self.obj_context.session): + super(SecurityGroup, self).create() + if is_default: + default_group = _DefaultSecurityGroup( + self.obj_context, + project_id=self.project_id, + security_group_id=self.id) + default_group.create() + self.is_default = True + self.obj_reset_changes(['is_default']) + + def from_db_object(self, *objs): + super(SecurityGroup, self).from_db_object(*objs) + for obj in objs: + self._load_is_default(obj) + + def _load_is_default(self, db_obj): + setattr(self, 'is_default', bool(db_obj.get('default_security_group'))) + self.obj_reset_changes(['is_default']) + + +@obj_base.VersionedObjectRegistry.register +class _DefaultSecurityGroup(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = sg_models.DefaultSecurityGroup + + fields = { + 'project_id': obj_fields.StringField(), + 'security_group_id': obj_fields.UUIDField(), + } + + fields_no_update = ['security_group_id'] + + primary_keys = ['project_id'] + + +@obj_base.VersionedObjectRegistry.register +class SecurityGroupRule(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = sg_models.SecurityGroupRule + + fields = { + 'id': obj_fields.UUIDField(), + 'project_id': obj_fields.StringField(nullable=True), + 'security_group_id': obj_fields.UUIDField(), + 'remote_group_id': obj_fields.UUIDField(nullable=True), + 'direction': common_types.FlowDirectionEnumField(nullable=True), + 'ethertype': common_types.EtherTypeEnumField(nullable=True), + 'protocol': common_types.IpProtocolEnumField(nullable=True), + 'port_range_min': common_types.PortRangeField(nullable=True), + 'port_range_max': common_types.PortRangeField(nullable=True), + 'remote_ip_prefix': obj_fields.IPNetworkField(nullable=True), + } + + foreign_keys = {'SecurityGroup': {'security_group_id': 'id'}} + + fields_no_update = ['project_id', 'security_group_id'] + + # TODO(sayalilunkad): get rid of it once we switch the db model to using + # custom types. + @classmethod + def modify_fields_to_db(cls, fields): + result = super(SecurityGroupRule, cls).modify_fields_to_db(fields) + remote_ip_prefix = result.get('remote_ip_prefix') + if remote_ip_prefix: + result['remote_ip_prefix'] = cls.filter_to_str(remote_ip_prefix) + return result + + # TODO(sayalilunkad): get rid of it once we switch the db model to using + # custom types. + @classmethod + def modify_fields_from_db(cls, db_obj): + fields = super(SecurityGroupRule, cls).modify_fields_from_db(db_obj) + if 'remote_ip_prefix' in fields: + fields['remote_ip_prefix'] = ( + utils.AuthenticIPNetwork(fields['remote_ip_prefix'])) + return fields diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/subnetpool.py neutron-9.0.0~b3~dev557/neutron/objects/subnetpool.py --- neutron-9.0.0~b2~dev280/neutron/objects/subnetpool.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/subnetpool.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,7 +14,6 @@ # under the License. import netaddr -from oslo_log import log from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields @@ -23,8 +22,6 @@ from neutron.objects import base from neutron.objects import common_types -LOG = log.getLogger(__name__) - @obj_base.VersionedObjectRegistry.register class SubnetPool(base.NeutronDbObject): @@ -157,7 +154,7 @@ def modify_fields_to_db(cls, fields): result = super(SubnetPoolPrefix, cls).modify_fields_to_db(fields) if 'cidr' in result: - result['cidr'] = str(result['cidr']) + result['cidr'] = cls.filter_to_str(result['cidr']) return result # TODO(ihrachys): get rid of it once we switch the db model to using CIDR diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/subnet.py neutron-9.0.0~b3~dev557/neutron/objects/subnet.py --- neutron-9.0.0~b2~dev280/neutron/objects/subnet.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/subnet.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,9 +15,12 @@ from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fields as obj_fields +from neutron.common import utils from neutron.db import models_v2 +from neutron.db import rbac_db_models from neutron.objects import base from neutron.objects import common_types +from neutron.objects import rbac_db @obj_base.VersionedObjectRegistry.register @@ -29,7 +32,7 @@ primary_keys = ['address', 'subnet_id'] - foreign_keys = {'subnet_id': 'id'} + foreign_keys = {'Subnet': {'subnet_id': 'id'}} fields = { 'address': obj_fields.StringField(), @@ -37,6 +40,18 @@ 'order': obj_fields.IntegerField() } + @classmethod + def get_objects(cls, context, _pager=None, **kwargs): + """Fetch DNSNameServer objects with default sort by 'order' field. + """ + if not _pager: + _pager = base.Pager() + if not _pager.sorts: + # (NOTE) True means ASC, False is DESC + _pager.sorts = [('order', True)] + return super(DNSNameServer, cls).get_objects(context, _pager, + **kwargs) + @obj_base.VersionedObjectRegistry.register class Route(base.NeutronDbObject): @@ -47,11 +62,11 @@ primary_keys = ['destination', 'nexthop', 'subnet_id'] - foreign_keys = {'subnet_id': 'id'} + foreign_keys = {'Subnet': {'subnet_id': 'id'}} fields = { 'subnet_id': obj_fields.UUIDField(), - 'destination': obj_fields.IPNetworkField(), + 'destination': common_types.IPNetworkField(), 'nexthop': obj_fields.IPAddressField() } @@ -60,7 +75,8 @@ # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Route, cls).modify_fields_from_db(db_obj) if 'destination' in result: - result['destination'] = netaddr.IPNetwork(result['destination']) + result['destination'] = utils.AuthenticIPNetwork( + result['destination']) if 'nexthop' in result: result['nexthop'] = netaddr.IPAddress(result['nexthop']) return result @@ -70,9 +86,9 @@ # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Route, cls).modify_fields_to_db(fields) if 'destination' in result: - result['destination'] = str(result['destination']) + result['destination'] = cls.filter_to_str(result['destination']) if 'nexthop' in fields: - result['nexthop'] = str(result['nexthop']) + result['nexthop'] = cls.filter_to_str(result['nexthop']) return result @@ -83,7 +99,7 @@ db_model = models_v2.IPAllocationPool - foreign_keys = {'subnet_id': 'id'} + foreign_keys = {'Subnet': {'subnet_id': 'id'}} fields_need_translation = { 'start': 'first_ip', @@ -112,12 +128,19 @@ # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(IPAllocationPool, cls).modify_fields_to_db(fields) if 'first_ip' in result: - result['first_ip'] = str(result['first_ip']) + result['first_ip'] = cls.filter_to_str(result['first_ip']) if 'last_ip' in result: - result['last_ip'] = str(result['last_ip']) + result['last_ip'] = cls.filter_to_str(result['last_ip']) return result +# RBAC metaclass is not applied here because 'shared' attribute of Subnet +# is dependent on Network 'shared' state, and in Subnet object +# it can be read-only. The necessary changes are applied manually: +# - defined 'shared' attribute in 'fields' +# - added 'shared' to synthetic_fields +# - registered extra_filter_name for 'shared' attribute +# - added loading shared attribute based on network 'rbac_entries' @obj_base.VersionedObjectRegistry.register class Subnet(base.NeutronDbObject): # Version 1.0: Initial version @@ -127,16 +150,18 @@ fields = { 'id': obj_fields.UUIDField(), - 'project_id': obj_fields.UUIDField(), - 'name': obj_fields.StringField(), + 'project_id': obj_fields.StringField(nullable=True), + 'name': obj_fields.StringField(nullable=True), 'network_id': obj_fields.UUIDField(), + 'segment_id': obj_fields.UUIDField(nullable=True), 'subnetpool_id': obj_fields.UUIDField(nullable=True), 'ip_version': common_types.IPVersionEnumField(), - 'cidr': obj_fields.IPNetworkField(), + 'cidr': common_types.IPNetworkField(), 'gateway_ip': obj_fields.IPAddressField(nullable=True), 'allocation_pools': obj_fields.ListOfObjectsField('IPAllocationPool', nullable=True), - 'enable_dhcp': obj_fields.BooleanField(), + 'enable_dhcp': obj_fields.BooleanField(nullable=True), + 'shared': obj_fields.BooleanField(nullable=True), 'dns_nameservers': obj_fields.ListOfObjectsField('DNSNameServer', nullable=True), 'host_routes': obj_fields.ListOfObjectsField('Route', nullable=True), @@ -144,20 +169,57 @@ 'ipv6_address_mode': common_types.IPV6ModeEnumField(nullable=True) } - synthetic_fields = ['allocation_pools', 'dns_nameservers', 'host_routes'] + synthetic_fields = ['allocation_pools', 'dns_nameservers', 'host_routes', + 'shared'] - foreign_keys = {'network_id': 'id'} + foreign_keys = {'Network': {'network_id': 'id'}} + + fields_no_update = ['project_id'] fields_need_translation = { - 'project_id': 'tenant_id' + 'project_id': 'tenant_id', + 'host_routes': 'routes' } + def __init__(self, context=None, **kwargs): + super(Subnet, self).__init__(context, **kwargs) + self.add_extra_filter_name('shared') + + def obj_load_attr(self, attrname): + if attrname == 'shared': + return self._load_shared() + super(Subnet, self).obj_load_attr(attrname) + + def _load_shared(self, db_obj=None): + if db_obj: + # NOTE(korzen) db_obj is passed when Subnet object is loaded + # from DB + rbac_entries = db_obj.get('rbac_entries') or {} + shared = (rbac_db.RbacNeutronDbObjectMixin. + is_network_shared(self.obj_context, rbac_entries)) + else: + # NOTE(korzen) this case is used when Subnet object was + # instantiated and without DB interaction (get_object(s), update, + # create), it should be rare case to load 'shared' by that method + shared = (rbac_db.RbacNeutronDbObjectMixin. + get_shared_with_tenant(self.obj_context.elevated(), + rbac_db_models.NetworkRBAC, + self.network_id, + self.project_id)) + setattr(self, 'shared', shared) + self.obj_reset_changes(['shared']) + + def from_db_object(self, *objs): + super(Subnet, self).from_db_object(*objs) + for obj in objs: + self._load_shared(obj) + @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Subnet, cls).modify_fields_from_db(db_obj) if 'cidr' in result: - result['cidr'] = netaddr.IPNetwork(result['cidr']) + result['cidr'] = utils.AuthenticIPNetwork(result['cidr']) if 'gateway_ip' in result and result['gateway_ip'] is not None: result['gateway_ip'] = netaddr.IPAddress(result['gateway_ip']) return result @@ -167,7 +229,7 @@ # TODO(korzen) remove this method when IP and CIDR decorator ready result = super(Subnet, cls).modify_fields_to_db(fields) if 'cidr' in result: - result['cidr'] = str(result['cidr']) + result['cidr'] = cls.filter_to_str(result['cidr']) if 'gateway_ip' in result and result['gateway_ip'] is not None: - result['gateway_ip'] = str(result['gateway_ip']) + result['gateway_ip'] = cls.filter_to_str(result['gateway_ip']) return result diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/trunk.py neutron-9.0.0~b3~dev557/neutron/objects/trunk.py --- neutron-9.0.0~b2~dev280/neutron/objects/trunk.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/trunk.py 2016-08-29 20:05:49.000000000 +0000 @@ -32,7 +32,7 @@ db_model = models.SubPort primary_keys = ['port_id'] - foreign_keys = {'trunk_id': 'id'} + foreign_keys = {'Trunk': {'trunk_id': 'id'}} fields = { 'port_id': obj_fields.UUIDField(), @@ -43,6 +43,12 @@ fields_no_update = ['segmentation_type', 'segmentation_id'] + def to_dict(self): + _dict = super(SubPort, self).to_dict() + # trunk_id is redundant in the subport dict. + _dict.pop('trunk_id') + return _dict + def create(self): with db_api.autonested_transaction(self.obj_context.session): try: @@ -66,6 +72,11 @@ raise t_exc.TrunkNotFound(trunk_id=self.trunk_id) raise n_exc.PortNotFound(port_id=self.port_id) + except base.NeutronDbObjectDuplicateEntry: + raise t_exc.DuplicateSubPort( + segmentation_type=self.segmentation_type, + segmentation_id=self.segmentation_id, + trunk_id=self.trunk_id) @obj_base.VersionedObjectRegistry.register @@ -76,9 +87,12 @@ db_model = models.Trunk fields = { + 'admin_state_up': obj_fields.BooleanField(), 'id': obj_fields.UUIDField(), 'tenant_id': obj_fields.StringField(), + 'name': obj_fields.StringField(), 'port_id': obj_fields.UUIDField(), + 'status': obj_fields.StringField(), 'sub_ports': obj_fields.ListOfObjectsField(SubPort.__name__), } @@ -97,10 +111,12 @@ except o_db_exc.DBReferenceError: raise n_exc.PortNotFound(port_id=self.port_id) - for sub_port in sub_ports: - sub_port.trunk_id = self.id - sub_port.create() - self.load_synthetic_db_fields() + if sub_ports: + for sub_port in sub_ports: + sub_port.trunk_id = self.id + sub_port.create() + self.sub_ports.append(sub_port) + self.obj_reset_changes(['sub_ports']) # TODO(ivc): add support for 'sub_ports' in 'Trunk.update' for # consistency with 'Trunk.create' diff -Nru neutron-9.0.0~b2~dev280/neutron/objects/utils.py neutron-9.0.0~b3~dev557/neutron/objects/utils.py --- neutron-9.0.0~b2~dev280/neutron/objects/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/objects/utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from neutron.common import exceptions + + +def convert_filters(**kwargs): + result = copy.deepcopy(kwargs) + if 'tenant_id' in result: + if 'project_id' in result: + raise exceptions.TenantIdProjectIdFilterConflict() + + result['project_id'] = result.pop('tenant_id') + return result diff -Nru neutron-9.0.0~b2~dev280/neutron/opts.py neutron-9.0.0~b3~dev557/neutron/opts.py --- neutron-9.0.0~b2~dev280/neutron/opts.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/opts.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,10 +17,8 @@ from keystoneauth1 import loading as ks_loading from oslo_config import cfg +import neutron.agent.agent_extensions_manager import neutron.agent.common.config -import neutron.agent.common.ovs_lib -import neutron.agent.dhcp.config -import neutron.agent.l2.extensions.manager import neutron.agent.l3.config import neutron.agent.l3.ha import neutron.agent.linux.interface @@ -30,8 +28,18 @@ import neutron.agent.ovsdb.api import neutron.agent.securitygroups_rpc import neutron.common.cache_utils +import neutron.conf.agent.dhcp +import neutron.conf.agent.l3.config +import neutron.conf.agent.ovs_conf +import neutron.conf.common +import neutron.conf.extensions.allowedaddresspairs +import neutron.conf.plugins.ml2.drivers.agent +import neutron.conf.plugins.ml2.drivers.linuxbridge import neutron.conf.quota import neutron.conf.service +import neutron.conf.services.metering_agent +import neutron.conf.services.qos_driver_manager +import neutron.conf.wsgi import neutron.db.agents_db import neutron.db.agentschedulers_db import neutron.db.dvr_mac_db @@ -41,12 +49,9 @@ import neutron.db.l3_gwmode_db import neutron.db.l3_hamode_db import neutron.db.migration.cli -import neutron.extensions.allowedaddresspairs import neutron.extensions.l3 import neutron.extensions.securitygroup import neutron.plugins.ml2.config -import neutron.plugins.ml2.drivers.agent.config -import neutron.plugins.ml2.drivers.linuxbridge.agent.common.config import neutron.plugins.ml2.drivers.macvtap.agent.config import neutron.plugins.ml2.drivers.mech_sriov.agent.common.config import neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver @@ -56,8 +61,6 @@ import neutron.plugins.ml2.drivers.type_gre import neutron.plugins.ml2.drivers.type_vlan import neutron.plugins.ml2.drivers.type_vxlan -import neutron.services.metering.agents.metering_agent -import neutron.services.qos.notification_drivers.manager import neutron.wsgi @@ -98,7 +101,8 @@ def list_extension_opts(): return [ ('DEFAULT', - neutron.extensions.allowedaddresspairs.allowed_address_pair_opts), + neutron.conf.extensions.allowedaddresspairs + .allowed_address_pair_opts), ('quotas', itertools.chain( neutron.conf.quota.l3_quota_opts, @@ -129,14 +133,14 @@ return [ ('DEFAULT', itertools.chain( - neutron.common.config.core_cli_opts, - neutron.common.config.core_opts, - neutron.wsgi.socket_opts, + neutron.conf.common.core_cli_opts, + neutron.conf.common.core_opts, + neutron.conf.wsgi.socket_opts, neutron.conf.service.service_opts) ), - (neutron.common.config.NOVA_CONF_SECTION, + (neutron.conf.common.NOVA_CONF_SECTION, itertools.chain( - neutron.common.config.nova_opts) + neutron.conf.common.nova_opts) ), ('quotas', neutron.conf.quota.core_quota_opts) ] @@ -145,7 +149,7 @@ def list_qos_opts(): return [ ('qos', - neutron.services.qos.notification_drivers.manager.QOS_PLUGIN_OPTS) + neutron.conf.services.qos_driver_manager.QOS_PLUGIN_OPTS) ] @@ -155,9 +159,13 @@ itertools.chain( neutron.agent.linux.interface.OPTS, neutron.agent.common.config.INTERFACE_DRIVER_OPTS, - neutron.agent.common.ovs_lib.OPTS) + neutron.conf.agent.ovs_conf.OPTS) ), - ('AGENT', neutron.agent.common.config.AGENT_STATE_OPTS) + ('AGENT', + itertools.chain( + neutron.agent.common.config.AGENT_STATE_OPTS, + neutron.agent.common.config.AVAILABILITY_ZONE_OPTS) + ) ] @@ -165,9 +173,9 @@ return [ ('DEFAULT', itertools.chain( - neutron.agent.dhcp.config.DHCP_AGENT_OPTS, - neutron.agent.dhcp.config.DHCP_OPTS, - neutron.agent.dhcp.config.DNSMASQ_OPTS) + neutron.conf.agent.dhcp.DHCP_AGENT_OPTS, + neutron.conf.agent.dhcp.DHCP_OPTS, + neutron.conf.agent.dhcp.DNSMASQ_OPTS) ) ] @@ -175,15 +183,16 @@ def list_linux_bridge_opts(): return [ ('linux_bridge', - neutron.plugins.ml2.drivers.linuxbridge.agent.common.config. - bridge_opts), + neutron.conf.plugins.ml2.drivers.linuxbridge.bridge_opts), ('vxlan', - neutron.plugins.ml2.drivers.linuxbridge.agent.common.config. - vxlan_opts), + neutron.conf.plugins.ml2.drivers.linuxbridge.vxlan_opts), ('agent', - neutron.plugins.ml2.drivers.agent.config.agent_opts), + itertools.chain( + neutron.conf.plugins.ml2.drivers.agent.agent_opts, + neutron.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS) + ), ('securitygroup', - neutron.agent.securitygroups_rpc.security_group_opts) + neutron.conf.agent.securitygroups_rpc.security_group_opts) ] @@ -191,7 +200,7 @@ return [ ('DEFAULT', itertools.chain( - neutron.agent.l3.config.OPTS, + neutron.conf.agent.l3.config.OPTS, neutron.conf.service.service_opts, neutron.agent.l3.ha.OPTS, neutron.agent.linux.pd.OPTS, @@ -205,9 +214,9 @@ ('macvtap', neutron.plugins.ml2.drivers.macvtap.agent.config.macvtap_opts), ('agent', - neutron.plugins.ml2.drivers.agent.config.agent_opts), + neutron.conf.plugins.ml2.drivers.agent.agent_opts), ('securitygroup', - neutron.agent.securitygroups_rpc.security_group_opts) + neutron.conf.agent.securitygroups_rpc.security_group_opts) ] @@ -228,8 +237,7 @@ return [ ('DEFAULT', itertools.chain( - neutron.services.metering.agents.metering_agent.MeteringAgent. - Opts, + neutron.conf.services.metering_agent.metering_agent_opts, neutron.agent.common.config.INTERFACE_DRIVER_OPTS) ) ] @@ -250,7 +258,7 @@ ('ml2_type_geneve', neutron.plugins.ml2.drivers.type_geneve.geneve_opts), ('securitygroup', - neutron.agent.securitygroups_rpc.security_group_opts) + neutron.conf.agent.securitygroups_rpc.security_group_opts) ] @@ -271,10 +279,13 @@ neutron.agent.ovsdb.api.OPTS) ), ('agent', - neutron.plugins.ml2.drivers.openvswitch.agent.common.config. - agent_opts), + itertools.chain( + neutron.plugins.ml2.drivers.openvswitch.agent.common.config. + agent_opts, + neutron.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS) + ), ('securitygroup', - neutron.agent.securitygroups_rpc.security_group_opts) + neutron.conf.agent.securitygroups_rpc.security_group_opts) ] @@ -284,7 +295,7 @@ neutron.plugins.ml2.drivers.mech_sriov.agent.common.config. sriov_nic_opts), ('agent', - neutron.agent.l2.extensions.manager.L2_AGENT_EXT_MANAGER_OPTS) + neutron.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS) ] diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/app.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/app.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/app.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/app.py 2016-08-03 20:10:34.000000000 +0000 @@ -25,8 +25,8 @@ from neutron.pecan_wsgi import startup CONF = cfg.CONF -CONF.import_opt('bind_host', 'neutron.common.config') -CONF.import_opt('bind_port', 'neutron.common.config') +CONF.import_opt('bind_host', 'neutron.conf.common') +CONF.import_opt('bind_port', 'neutron.conf.common') def setup_app(*args, **kwargs): @@ -50,8 +50,8 @@ hooks.OwnershipValidationHook(), # priority 125 hooks.QuotaEnforcementHook(), # priority 130 hooks.NotifierHook(), # priority 135 + hooks.QueryParametersHook(), # priority 139 hooks.PolicyHook(), # priority 140 - hooks.QueryParametersHook(), # priority 145 ] app = pecan.make_app( diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/controllers/quota.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/controllers/quota.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/controllers/quota.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/controllers/quota.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,7 +16,6 @@ from neutron_lib.api import converters from neutron_lib import exceptions as n_exc from oslo_config import cfg -from oslo_log import log from oslo_utils import importutils import pecan from pecan import request @@ -28,7 +27,6 @@ from neutron.pecan_wsgi.controllers import utils from neutron.quota import resource_registry -LOG = log.getLogger(__name__) RESOURCE_NAME = "quota" TENANT_ID_ATTR = {'tenant_id': {'allow_post': False, diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/controllers/resource.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/controllers/resource.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/controllers/resource.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/controllers/resource.py 2016-08-03 20:10:34.000000000 +0000 @@ -26,9 +26,11 @@ class ItemController(utils.NeutronPecanController): - def __init__(self, resource, item, plugin=None, resource_info=None): + def __init__(self, resource, item, plugin=None, resource_info=None, + parent_resource=None): super(ItemController, self).__init__(None, resource, plugin=plugin, - resource_info=resource_info) + resource_info=resource_info, + parent_resource=parent_resource) self.item = item @utils.expose(generic=True) @@ -36,11 +38,15 @@ return self.get(*args, **kwargs) def get(self, *args, **kwargs): - getter = getattr(self.plugin, 'get_%s' % self.resource) neutron_context = request.context['neutron_context'] + getter_args = [neutron_context, self.item] + # NOTE(tonytan4ever): This implicitly forces the getter method + # uses the parent_id as the last argument, thus easy for future + # refactoring + if 'parent_id' in request.context: + getter_args.append(request.context['parent_id']) fields = request.context['query_params'].get('fields') - return {self.resource: getter(neutron_context, self.item, - fields=fields)} + return {self.resource: self.plugin_shower(*getter_args, fields=fields)} @utils.when(index, method='HEAD') @utils.when(index, method='POST') @@ -53,19 +59,24 @@ neutron_context = request.context['neutron_context'] resources = request.context['resources'] # TODO(kevinbenton): bulk? - updater = getattr(self.plugin, 'update_%s' % self.resource) # Bulk update is not supported, 'resources' always contains a single # elemenet data = {self.resource: resources[0]} - return {self.resource: updater(neutron_context, self.item, data)} + updater_args = [neutron_context, self.item] + if 'parent_id' in request.context: + updater_args.append(request.context['parent_id']) + updater_args.append(data) + return {self.resource: self.plugin_updater(*updater_args)} @utils.when(index, method='DELETE') def delete(self): # TODO(kevinbenton): setting code could be in a decorator pecan.response.status = 204 neutron_context = request.context['neutron_context'] - deleter = getattr(self.plugin, 'delete_%s' % self.resource) - return deleter(neutron_context, self.item) + deleter_args = [neutron_context, self.item] + if 'parent_id' in request.context: + deleter_args.append(request.context['parent_id']) + return self.plugin_deleter(*deleter_args) @utils.expose() def _lookup(self, collection, *remainder): @@ -74,8 +85,10 @@ collection) if not controller: LOG.warning(_LW("No controller found for: %s - returning response " - "code 404"), collection) + "code 404"), collection) pecan.abort(404) + request.context['resource'] = controller.resource + request.context['parent_id'] = request.context['resource_id'] return controller, remainder @@ -90,7 +103,11 @@ uri_identifier = '%s_id' % self.resource request.context['uri_identifiers'][uri_identifier] = item return (self.item_controller_class( - self.resource, item, resource_info=self.resource_info), + self.resource, item, resource_info=self.resource_info, + # NOTE(tonytan4ever): item needs to share the same + # parent as collection + parent_resource=self.parent + ), remainder) @utils.expose(generic=True) @@ -99,12 +116,13 @@ def get(self, *args, **kwargs): # NOTE(blogan): these are set in the FieldsAndFiltersHoook - fields = request.context['query_params'].get('fields') - filters = request.context['query_params'].get('filters') - lister = getattr(self.plugin, 'get_%s' % self.collection) + query_params = request.context['query_params'] neutron_context = request.context['neutron_context'] - return {self.collection: lister(neutron_context, - fields=fields, filters=filters)} + lister_args = [neutron_context] + if 'parent_id' in request.context: + lister_args.append(request.context['parent_id']) + return {self.collection: self.plugin_lister(*lister_args, + **query_params)} @utils.when(index, method='HEAD') @utils.when(index, method='PATCH') @@ -123,13 +141,16 @@ def create(self, resources): if len(resources) > 1: # Bulk! - method = 'create_%s_bulk' % self.resource + creator = self.plugin_bulk_creator key = self.collection data = {key: [{self.resource: res} for res in resources]} else: - method = 'create_%s' % self.resource + creator = self.plugin_creator key = self.resource data = {key: resources[0]} - creator = getattr(self.plugin, method) neutron_context = request.context['neutron_context'] - return {key: creator(neutron_context, data)} + creator_args = [neutron_context] + if 'parent_id' in request.context: + creator_args.append(request.context['parent_id']) + creator_args.append(data) + return {key: creator(*creator_args)} diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/controllers/utils.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/controllers/utils.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/controllers/utils.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/controllers/utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -17,9 +17,12 @@ import functools from neutron_lib import constants +from oslo_config import cfg import pecan from pecan import request +import six +from neutron.api import api_common from neutron.api.v2 import attributes as api_attributes from neutron.db import api as db_api from neutron import manager @@ -87,7 +90,15 @@ class NeutronPecanController(object): - def __init__(self, collection, resource, plugin=None, resource_info=None): + LIST = 'list' + SHOW = 'show' + CREATE = 'create' + UPDATE = 'update' + DELETE = 'delete' + + def __init__(self, collection, resource, plugin=None, resource_info=None, + allow_pagination=None, allow_sorting=None, + parent_resource=None): # Ensure dashes are always replaced with underscores self.collection = collection and collection.replace('-', '_') self.resource = resource and resource.replace('-', '_') @@ -101,11 +112,38 @@ data.get('required_by_policy')]) else: self._mandatory_fields = set() + self.allow_pagination = allow_pagination + if self.allow_pagination is None: + self.allow_pagination = cfg.CONF.allow_pagination + self.allow_sorting = allow_sorting + if self.allow_sorting is None: + self.allow_sorting = cfg.CONF.allow_sorting + self.native_pagination = api_common.is_native_pagination_supported( + self.plugin) + self.native_sorting = api_common.is_native_sorting_supported( + self.plugin) + self.primary_key = self._get_primary_key() + + self.parent = parent_resource + parent_resource = '_%s' % parent_resource if parent_resource else '' + self._parent_id_name = ('%s_id' % parent_resource + if parent_resource else None) + self._plugin_handlers = { + self.LIST: 'get%s_%s' % (parent_resource, self.collection), + self.SHOW: 'get%s_%s' % (parent_resource, self.resource) + } + for action in [self.CREATE, self.UPDATE, self.DELETE]: + self._plugin_handlers[action] = '%s%s_%s' % ( + action, parent_resource, self.resource) def build_field_list(self, request_fields): + added_fields = [] + combined_fields = [] if request_fields: - return set(request_fields) | self._mandatory_fields - return [] + req_fields_set = set(request_fields) + added_fields = self._mandatory_fields - req_fields_set + combined_fields = req_fields_set | self._mandatory_fields + return list(combined_fields), list(added_fields) @property def plugin(self): @@ -121,6 +159,43 @@ self.collection) return self._resource_info + def _get_primary_key(self, default_primary_key='id'): + if not self.resource_info: + return default_primary_key + for key, value in six.iteritems(self.resource_info): + if value.get('primary_key', False): + return key + return default_primary_key + + @property + def plugin_handlers(self): + return self._plugin_handlers + + @property + def plugin_lister(self): + return getattr(self.plugin, self._plugin_handlers[self.LIST]) + + @property + def plugin_shower(self): + return getattr(self.plugin, self._plugin_handlers[self.SHOW]) + + @property + def plugin_creator(self): + return getattr(self.plugin, self._plugin_handlers[self.CREATE]) + + @property + def plugin_bulk_creator(self): + return getattr(self.plugin, + '%s_bulk' % self._plugin_handlers[self.CREATE]) + + @property + def plugin_deleter(self): + return getattr(self.plugin, self._plugin_handlers[self.DELETE]) + + @property + def plugin_updater(self): + return getattr(self.plugin, self._plugin_handlers[self.UPDATE]) + class ShimRequest(object): diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/hooks/context.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/hooks/context.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/hooks/context.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/hooks/context.py 2016-08-29 20:05:49.000000000 +0000 @@ -39,22 +39,15 @@ priority = 95 def before(self, state): - user_id = state.request.headers.get('X-User-Id') - user_id = state.request.headers.get('X-User', user_id) user_name = state.request.headers.get('X-User-Name', '') - tenant_id = state.request.headers.get('X-Project-Id') tenant_name = state.request.headers.get('X-Project-Name') - auth_token = state.request.headers.get('X-Auth-Token') - roles = state.request.headers.get('X-Roles', '').split(',') - roles = [r.strip() for r in roles] - creds = {'roles': roles} req_id = state.request.headers.get(request_id.ENV_REQUEST_ID) # TODO(kevinbenton): is_admin logic # Create a context with the authentication data - ctx = context.Context(user_id, tenant_id=tenant_id, - roles=creds['roles'], - user_name=user_name, tenant_name=tenant_name, - request_id=req_id, auth_token=auth_token) + ctx = context.Context.from_environ(state.request.environ, + user_name=user_name, + tenant_name=tenant_name, + request_id=req_id) # Inject the context... state.request.context['neutron_context'] = ctx diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/hooks/policy_enforcement.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/hooks/policy_enforcement.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/hooks/policy_enforcement.py 2016-06-06 16:54:53.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/hooks/policy_enforcement.py 2016-08-29 20:05:49.000000000 +0000 @@ -35,7 +35,8 @@ return quota.get_tenant_quotas(resource_id)[quotasv2.RESOURCE_NAME] -def fetch_resource(neutron_context, collection, resource, resource_id): +def fetch_resource(neutron_context, collection, resource, resource_id, + parent_id=None): controller = manager.NeutronManager.get_controller_for_resource( collection) attrs = controller.resource_info @@ -50,9 +51,11 @@ value.get('primary_key') or 'default' not in value)] plugin = manager.NeutronManager.get_plugin_for_resource(resource) if plugin: - getter = getattr(plugin, 'get_%s' % resource) - # TODO(kevinbenton): the parent_id logic currently in base.py - return getter(neutron_context, resource_id, fields=field_list) + getter = controller.plugin_shower + getter_args = [neutron_context, resource_id] + if parent_id: + getter_args.append(parent_id) + return getter(*getter_args, fields=field_list) else: # Some legit resources, like quota, do not have a plugin yet. # Retrieving the original object is nevertheless important @@ -81,8 +84,13 @@ needs_prefetch = (state.request.method == 'PUT' or state.request.method == 'DELETE') policy.init() - action = '%s_%s' % (pecan_constants.ACTION_MAP[state.request.method], - resource) + + # NOTE(tonytan4ever): needs to get the actual action from controller's + # _plugin_handlers + controller = manager.NeutronManager.get_controller_for_resource( + collection) + action = controller.plugin_handlers[ + pecan_constants.ACTION_MAP[state.request.method]] # NOTE(salv-orlando): As bulk updates are not supported, in case of PUT # requests there will be only a single item to process, and its @@ -97,8 +105,10 @@ # Ops... this was a delete after all! item = {} resource_id = state.request.context.get('resource_id') + parent_id = state.request.context.get('parent_id') resource_obj = fetch_resource(neutron_context, collection, - resource, resource_id) + resource, resource_id, + parent_id=parent_id) if resource_obj: original_resources.append(resource_obj) obj = copy.copy(resource_obj) @@ -166,7 +176,7 @@ # This exception must be explicitly caught as the exception # translation hook won't be called if an error occurs in the # 'after' handler. - raise webob.exc.HTTPForbidden(e.message) + raise webob.exc.HTTPForbidden(str(e)) if is_single: resp = resp[0] diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/hooks/query_parameters.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/hooks/query_parameters.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/hooks/query_parameters.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/hooks/query_parameters.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,6 +14,44 @@ from neutron.api import api_common from neutron import manager +from neutron.pecan_wsgi.hooks import policy_enforcement + + +# TODO(blogan): ideally it'd be nice to get the pagination and sorting +# helpers from the controller but since the controllers are +# instantiated at startup and not on request, it would cause race +# conditions because we need a new instantiation of a pagination +# and sorting helper per request/response flow. As a result, we're forced to +# pass them through the request context. + +def _get_pagination_helper(request, controller): + if 'pagination_helper' in request.context: + return request.context['pagination_helper'] + if not controller.allow_pagination: + helper = api_common.NoPaginationHelper(request, controller.primary_key) + elif controller.native_pagination: + helper = api_common.PaginationNativeHelper(request, + controller.primary_key) + else: + helper = api_common.PaginationEmulatedHelper(request, + controller.primary_key) + request.context['pagination_helper'] = helper + return helper + + +def _get_sorting_helper(request, controller): + if 'sorting_helper' in request.context: + return request.context['sorting_helper'] + if not controller.allow_sorting: + helper = api_common.NoSortingHelper(request, controller.resource_info) + elif controller.native_sorting: + helper = api_common.SortingNativeHelper(request, + controller.resource_info) + else: + helper = api_common.SortingEmulatedHelper(request, + controller.resource_info) + request.context['sorting_helper'] = helper + return helper def _listify(thing): @@ -26,8 +64,10 @@ # if only one fields query parameter is passed, pecan will not put # that parameter in a list, so we need to convert it into a list fields = _listify(fields) - combined_fields = controller.build_field_list(fields) - return combined_fields + combined_fields, added_fields = controller.build_field_list(fields) + state.request.context['query_params']['fields'] = combined_fields + state.request.context['added_fields'] = added_fields + return combined_fields, added_fields def _set_filters(state, controller): @@ -42,7 +82,9 @@ class QueryParametersHook(hooks.PecanHook): - priority = 145 + # NOTE(blogan): needs to be run after the priority hook. after methods + # are run in reverse priority order. + priority = policy_enforcement.PolicyHook.priority - 1 def before(self, state): state.request.context['query_params'] = {} @@ -53,7 +95,41 @@ return controller = manager.NeutronManager.get_controller_for_resource( collection) - combined_fields = _set_fields(state, controller) + combined_fields, added_fields = _set_fields(state, controller) filters = _set_filters(state, controller) query_params = {'fields': combined_fields, 'filters': filters} + pagination_helper = _get_pagination_helper(state.request, controller) + sorting_helper = _get_sorting_helper(state.request, controller) + sorting_helper.update_args(query_params) + sorting_helper.update_fields(query_params.get('fields', []), + added_fields) + pagination_helper.update_args(query_params) + pagination_helper.update_fields(query_params.get('fields', []), + added_fields) state.request.context['query_params'] = query_params + + def after(self, state): + resource = state.request.context.get('resource') + collection = state.request.context.get('collection') + # NOTE(blogan): don't paginate extension list or non-GET requests + if (not resource or resource == 'extension' or + state.request.method != 'GET'): + return + try: + data = state.response.json + except ValueError: + return + # Do not attempt to paginate if the body is not a list of entities + if not data or resource in data or collection not in data: + return + controller = manager.NeutronManager.get_controller_for_resource( + collection) + sorting_helper = _get_sorting_helper(state.request, controller) + pagination_helper = _get_pagination_helper(state.request, controller) + obj_list = sorting_helper.sort(data[collection]) + obj_list = pagination_helper.paginate(obj_list) + resp_body = {collection: obj_list} + pagination_links = pagination_helper.get_links(obj_list) + if pagination_links: + resp_body['_'.join([collection, 'links'])] = pagination_links + state.response.json = resp_body diff -Nru neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/startup.py neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/startup.py --- neutron-9.0.0~b2~dev280/neutron/pecan_wsgi/startup.py 2016-06-06 16:54:53.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/pecan_wsgi/startup.py 2016-08-03 20:10:34.000000000 +0000 @@ -38,7 +38,8 @@ for resource, collection in router.RESOURCES.items(): resource_registry.register_resource_by_name(resource) plugin = manager.NeutronManager.get_plugin() - new_controller = res_ctrl.CollectionsController(collection, resource) + new_controller = res_ctrl.CollectionsController(collection, resource, + plugin=plugin) manager.NeutronManager.set_controller_for_resource( collection, new_controller) manager.NeutronManager.set_plugin_for_resource(resource, plugin) @@ -69,8 +70,17 @@ resource = legacy_controller.resource plugin = legacy_controller.plugin attr_info = legacy_controller.attr_info + # Retrieving the parent resource. It is expected the format of + # the parent resource to be: + # {'collection_name': 'name-of-collection', + # 'member_name': 'name-of-resource'} + # collection_name does not appear to be used in the legacy code + # inside the controller logic, so we can assume we do not need it. + parent = legacy_controller.parent or {} + parent_resource = parent.get('member_name') new_controller = res_ctrl.CollectionsController( - collection, resource, resource_info=attr_info) + collection, resource, resource_info=attr_info, + parent_resource=parent_resource) manager.NeutronManager.set_plugin_for_resource(resource, plugin) if path_prefix: manager.NeutronManager.add_resource_for_path_prefix( diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/common/constants.py neutron-9.0.0~b3~dev557/neutron/plugins/common/constants.py --- neutron-9.0.0~b2~dev280/neutron/plugins/common/constants.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/common/constants.py 2016-08-03 20:10:34.000000000 +0000 @@ -45,6 +45,7 @@ 'timestamp_core': 'timestamp_core', 'network_ip_availability': 'network-ip-availability', 'flavors': 'flavors', + 'revisions': 'revisions', } # Service operation status constants @@ -91,7 +92,13 @@ MAX_VXLAN_VNI = 2 ** 24 - 1 VXLAN_UDP_PORT = 4789 -# Network Type MTU overhead -GENEVE_ENCAP_MIN_OVERHEAD = 50 -GRE_ENCAP_OVERHEAD = 42 -VXLAN_ENCAP_OVERHEAD = 50 +# Overlay (tunnel) protocol overhead +GENEVE_ENCAP_MIN_OVERHEAD = 30 +GRE_ENCAP_OVERHEAD = 22 +VXLAN_ENCAP_OVERHEAD = 30 + +# IP header length +IP_HEADER_LENGTH = { + 4: 20, + 6: 40, +} diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/common/utils.py neutron-9.0.0~b3~dev557/neutron/plugins/common/utils.py --- neutron-9.0.0~b2~dev280/neutron/plugins/common/utils.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/common/utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,6 +16,7 @@ Common utilities and helper functions for OpenStack Networking Plugins. """ +import contextlib import hashlib from neutron_lib import constants as n_const @@ -23,9 +24,10 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils +from oslo_utils import excutils import webob.exc -from neutron._i18n import _, _LI +from neutron._i18n import _, _LE, _LI from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry @@ -83,13 +85,18 @@ "than start of tunnel range")) +def raise_invalid_tag(vlan_str, vlan_range): + """Raise an exception for invalid tag.""" + raise n_exc.NetworkVlanRangeError( + vlan_range=vlan_range, + error=_("%s is not a valid VLAN tag") % vlan_str) + + def verify_vlan_range(vlan_range): """Raise an exception for invalid tags or malformed range.""" for vlan_tag in vlan_range: if not is_valid_vlan_tag(vlan_tag): - raise n_exc.NetworkVlanRangeError( - vlan_range=vlan_range, - error=_("%s is not a valid VLAN tag") % vlan_tag) + raise_invalid_tag(str(vlan_tag), vlan_range) if vlan_range[1] < vlan_range[0]: raise n_exc.NetworkVlanRangeError( vlan_range=vlan_range, @@ -100,13 +107,25 @@ """Interpret a string as network[:vlan_begin:vlan_end].""" entry = network_vlan_range.strip() if ':' in entry: - try: - network, vlan_min, vlan_max = entry.split(':') - vlan_range = (int(vlan_min), int(vlan_max)) - except ValueError as ex: - raise n_exc.NetworkVlanRangeError(vlan_range=entry, error=ex) + if entry.count(':') != 2: + raise n_exc.NetworkVlanRangeError( + vlan_range=entry, + error=_("Need exactly two values for VLAN range")) + network, vlan_min, vlan_max = entry.split(':') if not network: raise n_exc.PhysicalNetworkNameError() + + try: + vlan_min = int(vlan_min) + except ValueError: + raise_invalid_tag(vlan_min, entry) + + try: + vlan_max = int(vlan_max) + except ValueError: + raise_invalid_tag(vlan_max, entry) + + vlan_range = (vlan_min, vlan_max) verify_vlan_range(vlan_range) return network, vlan_range else: @@ -178,6 +197,26 @@ return core_plugin.create_port(context, {'port': port_data}) +class _DelManager(object): + def __init__(self): + self.delete_on_error = True + + +@contextlib.contextmanager +def delete_port_on_error(core_plugin, context, port_id): + mgr = _DelManager() + try: + yield mgr + except Exception: + with excutils.save_and_reraise_exception(): + try: + if mgr.delete_on_error: + core_plugin.delete_port(context, port_id, + l3_port_check=False) + except Exception: + LOG.exception(_LE("Failed to cleanup port: %s"), port_id) + + def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN): """Construct an interface name based on the prefix and name. diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/common/exceptions.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/common/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/common/exceptions.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/common/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -20,9 +20,15 @@ from neutron._i18n import _ -class MechanismDriverError(exceptions.NeutronException): +class MechanismDriverError(exceptions.MultipleExceptions): """Mechanism driver call failed.""" - message = _("%(method)s failed.") + + def __init__(self, method, errors=None): + # The message is not used by api, because api will unwrap + # MultipleExceptions and return inner exceptions. Keep it + # for backward-compatibility, in case other code use it. + self.message = _("%s failed.") % method + super(MechanismDriverError, self).__init__(errors) class ExtensionDriverError(exceptions.InvalidInput): diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/config.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/config.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/config.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/config.py 2016-08-03 20:10:34.000000000 +0000 @@ -61,7 +61,11 @@ "will have the same type as tenant networks. Allowed " "values for external_network_type config option depend " "on the network type values configured in type_drivers " - "config option.")) + "config option.")), + cfg.IntOpt('overlay_ip_version', + default=4, + help=_("IP version of all overlay (tunnel) network endpoints. " + "Use a value of 4 for IPv4 or 6 for IPv6.")) ] diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/db.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/db.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/db.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/db.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,13 +22,17 @@ from sqlalchemy import or_ from sqlalchemy.orm import exc -from neutron._i18n import _LE +from neutron._i18n import _, _LE +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources +from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 -from neutron.db import securitygroups_db as sg_db from neutron.db import segments_db from neutron.extensions import portbindings from neutron import manager from neutron.plugins.ml2 import models +from neutron.services.segments import exceptions as seg_exc LOG = log.getLogger(__name__) @@ -131,7 +135,7 @@ 'host': host}) -def ensure_dvr_port_binding(session, port_id, host, router_id=None): +def ensure_distributed_port_binding(session, port_id, host, router_id=None): record = (session.query(models.DistributedPortBinding). filter_by(port_id=port_id, host=host).first()) if record: @@ -149,15 +153,15 @@ session.add(record) return record except db_exc.DBDuplicateEntry: - LOG.debug("DVR Port %s already bound", port_id) + LOG.debug("Distributed Port %s already bound", port_id) return (session.query(models.DistributedPortBinding). filter_by(port_id=port_id, host=host).one()) -def delete_dvr_port_binding_if_stale(session, binding): +def delete_distributed_port_binding_if_stale(session, binding): if not binding.router_id and binding.status == n_const.PORT_STATUS_DOWN: with session.begin(subtransactions=True): - LOG.debug("DVR: Deleting binding %s", binding) + LOG.debug("Distributed port: Deleting binding %s", binding) session.delete(binding) @@ -209,7 +213,7 @@ def get_sg_ids_grouped_by_port(context, port_ids): sg_ids_grouped_by_port = {} - sg_binding_port = sg_db.SecurityGroupPortBinding.port_id + sg_binding_port = sg_models.SecurityGroupPortBinding.port_id with context.session.begin(subtransactions=True): # partial UUIDs must be individually matched with startswith. @@ -223,8 +227,9 @@ or_criteria.append(models_v2.Port.id.in_(full_uuids)) query = context.session.query( - models_v2.Port, sg_db.SecurityGroupPortBinding.security_group_id) - query = query.outerjoin(sg_db.SecurityGroupPortBinding, + models_v2.Port, + sg_models.SecurityGroupPortBinding.security_group_id) + query = query.outerjoin(sg_models.SecurityGroupPortBinding, models_v2.Port.id == sg_binding_port) query = query.filter(or_(*or_criteria)) @@ -264,9 +269,9 @@ return query.host -def generate_dvr_port_status(session, port_id): +def generate_distributed_port_status(session, port_id): # an OR'ed value of status assigned to parent port from the - # dvrportbinding bucket + # distributedportbinding bucket query = session.query(models.DistributedPortBinding) final_status = n_const.PORT_STATUS_BUILD for bind in query.filter(models.DistributedPortBinding.port_id == port_id): @@ -277,24 +282,24 @@ return final_status -def get_dvr_port_binding_by_host(session, port_id, host): +def get_distributed_port_binding_by_host(session, port_id, host): with session.begin(subtransactions=True): binding = (session.query(models.DistributedPortBinding). filter(models.DistributedPortBinding.port_id.startswith(port_id), models.DistributedPortBinding.host == host).first()) if not binding: - LOG.debug("No binding for DVR port %(port_id)s with host " + LOG.debug("No binding for distributed port %(port_id)s with host " "%(host)s", {'port_id': port_id, 'host': host}) return binding -def get_dvr_port_bindings(session, port_id): +def get_distributed_port_bindings(session, port_id): with session.begin(subtransactions=True): bindings = (session.query(models.DistributedPortBinding). filter(models.DistributedPortBinding.port_id.startswith( port_id)).all()) if not bindings: - LOG.debug("No bindings for DVR port %s", port_id) + LOG.debug("No bindings for distributed port %s", port_id) return bindings @@ -304,3 +309,30 @@ return bool(context.session.query(models_v2.Subnet). enable_eagerloads(False).filter_by(enable_dhcp=True). filter(models_v2.Subnet.id.in_(subnet_ids)).count()) + + +def _prevent_segment_delete_with_port_bound(resource, event, trigger, + context, segment): + """Raise exception if there are any ports bound with segment_id.""" + segment_id = segment['id'] + query = context.session.query(models_v2.Port) + query = query.join( + models.PortBindingLevel, + models.PortBindingLevel.port_id == models_v2.Port.id) + query = query.filter(models.PortBindingLevel.segment_id == segment_id) + port_ids = [p.id for p in query] + + # There are still some ports in the segment, segment should not be deleted + # TODO(xiaohhui): Should we delete the dhcp port automatically here? + if port_ids: + reason = _("The segment is still bound with port(s) " + "%s") % ", ".join(port_ids) + raise seg_exc.SegmentInUse(segment_id=segment_id, reason=reason) + + +def subscribe(): + registry.subscribe(_prevent_segment_delete_with_port_bound, + resources.SEGMENT, + events.BEFORE_DELETE) + +subscribe() diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/driver_context.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/driver_context.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/driver_context.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/driver_context.py 2016-08-03 20:10:34.000000000 +0000 @@ -259,7 +259,7 @@ network_id = self._network_context.current['id'] return self._plugin.type_manager.allocate_dynamic_segment( - self._plugin_context.session, network_id, segment) + self._plugin_context, network_id, segment) def release_dynamic_segment(self, segment_id): return self._plugin.type_manager.release_dynamic_segment( diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,11 +16,8 @@ import abc -from oslo_log import log as logging import six -LOG = logging.getLogger(__name__) - class NetworkSegment(object): """Represents a Neutron network segment""" diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/capabilities.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/capabilities.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/capabilities.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/capabilities.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,29 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.callbacks import events +from neutron.callbacks import registry + + +def notify_init_event(agent_type, agent): + """Notify init event for the specified agent.""" + registry.notify(agent_type, events.AFTER_INIT, agent, agent=agent) + + +def register(callback, agent_type): + """Subscribe callback to init event for the specified agent. + + :param agent_type: an agent type as defined in neutron_lib.constants. + :param callback: a callback that can process the agent init event. + """ + registry.subscribe(callback, agent_type, events.AFTER_INIT) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/_common_agent.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/_common_agent.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/_common_agent.py 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/_common_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -15,6 +15,7 @@ # under the License. import collections +import contextlib import sys import time @@ -23,13 +24,17 @@ from oslo_log import log as logging from oslo_service import loopingcall from oslo_service import service +from oslo_utils import excutils from osprofiler import profiler from neutron._i18n import _LE, _LI -from neutron.agent.l2.extensions import manager as ext_manager +from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.callbacks import resources +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources as local_resources from neutron.common import config as common_config from neutron.common import constants as n_const from neutron.common import topics @@ -167,7 +172,7 @@ def init_extension_manager(self, connection): ext_manager.register_opts(cfg.CONF) self.ext_manager = ( - ext_manager.AgentExtensionsManager(cfg.CONF)) + ext_manager.L2AgentExtensionsManager(cfg.CONF)) self.ext_manager.initialize( connection, self.mgr.get_extension_driver_type()) @@ -216,7 +221,15 @@ return True for device_details in devices_details_list: - device = device_details['device'] + self._process_device_if_exists(device_details) + # no resync is needed + return False + + def _process_device_if_exists(self, device_details): + # ignore exceptions from devices that disappear because they will + # be handled as removed in the next iteration + device = device_details['device'] + with self._ignore_missing_device_exceptions(device): LOG.debug("Port %s added", device) if 'port_id' in device_details: @@ -293,10 +306,22 @@ device_details['port_id'], device_details['device']) self.ext_manager.handle_port(self.context, device_details) + registry.notify(local_resources.PORT_DEVICE, + events.AFTER_UPDATE, self, + context=self.context, + device_details=device_details) else: LOG.info(_LI("Device %s not defined on plugin"), device) - # no resync is needed - return False + + @contextlib.contextmanager + def _ignore_missing_device_exceptions(self, device): + try: + yield + except Exception: + with excutils.save_and_reraise_exception() as ectx: + if device not in self.mgr.get_all_devices(): + ectx.reraise = False + LOG.debug("%s was removed during processing.", device) def treat_devices_removed(self, devices): resync = False @@ -321,6 +346,9 @@ self.ext_manager.delete_port(self.context, {'device': device, 'port_id': port_id}) + registry.notify(local_resources.PORT_DEVICE, events.AFTER_DELETE, + self, context=self.context, device=device, + port_id=port_id) if self.prevent_arp_spoofing: self.mgr.delete_arp_spoofing_protection(devices) return resync diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/config.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/config.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/agent/config.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/agent/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,37 +16,8 @@ from oslo_config import cfg -from neutron._i18n import _ from neutron.agent.common import config +from neutron.conf.plugins.ml2.drivers import agent -agent_opts = [ - cfg.IntOpt('polling_interval', default=2, - help=_("The number of seconds the agent will wait between " - "polling for local device changes.")), - cfg.IntOpt('quitting_rpc_timeout', default=10, - help=_("Set new timeout in seconds for new rpc calls after " - "agent receives SIGTERM. If value is set to 0, rpc " - "timeout won't be changed")), - # TODO(kevinbenton): The following opt is duplicated between the OVS agent - # and the Linuxbridge agent to make it easy to back-port. These shared opts - # should be moved into a common agent config options location as part of - # the deduplication work. - cfg.BoolOpt('prevent_arp_spoofing', default=True, - deprecated_for_removal=True, - help=_("Enable suppression of ARP responses that don't match " - "an IP address that belongs to the port from which " - "they originate. Note: This prevents the VMs attached " - "to this agent from spoofing, it doesn't protect them " - "from other devices which have the capability to spoof " - "(e.g. bare metal or VMs attached to agents without " - "this flag set to True). Spoofing rules will not be " - "added to any ports that have port security disabled. " - "For LinuxBridge, this requires ebtables. For OVS, it " - "requires a version that supports matching ARP " - "headers. This option will be removed in Ocata so " - "the only way to disable protection will be via the " - "port security extension.")) -] - -cfg.CONF.register_opts(agent_opts, "AGENT") +agent.register_agent_opts() config.register_agent_state_opts_helper(cfg.CONF) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/l2pop/db.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/l2pop/db.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/l2pop/db.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/l2pop/db.py 2016-08-03 20:10:34.000000000 +0000 @@ -70,7 +70,7 @@ return query -def get_nondvr_active_network_ports(session, network_id): +def get_nondistributed_active_network_ports(session, network_id): query = _get_active_network_ports(session, network_id) query = query.filter(models_v2.Port.device_owner != const.DEVICE_OWNER_DVR_INTERFACE) @@ -78,7 +78,7 @@ if get_agent_ip(agent)] -def get_dvr_active_network_ports(session, network_id): +def get_distributed_active_network_ports(session, network_id): with session.begin(subtransactions=True): query = session.query(ml2_models.DistributedPortBinding, agents_db.Agent) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/l2pop/mech_driver.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/l2pop/mech_driver.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/l2pop/mech_driver.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/l2pop/mech_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,13 +14,13 @@ # under the License. from neutron_lib import constants as const +from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging -from neutron._i18n import _LW +from neutron._i18n import _, _LW from neutron import context as n_context from neutron.db import api as db_api -from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers.l2pop import config # noqa from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db @@ -40,10 +40,18 @@ self.rpc_ctx = n_context.get_admin_context_without_session() def _get_port_fdb_entries(self, port): + # the port might be concurrently deleted + if not port or not port.get('fixed_ips'): + return [] + return [l2pop_rpc.PortInfo(mac_address=port['mac_address'], ip_address=ip['ip_address']) for ip in port['fixed_ips']] + def check_vlan_transparency(self, context): + """L2population driver vlan transparency support.""" + return True + def delete_port_postcommit(self, context): port = context.current agent_host = context.host @@ -110,9 +118,9 @@ if (orig['mac_address'] != port['mac_address'] and context.status == const.PORT_STATUS_ACTIVE): - LOG.warning(_LW("unable to modify mac_address of ACTIVE port " - "%s"), port['id']) - raise ml2_exc.MechanismDriverError(method='update_port_precommit') + msg = _("unable to modify mac_address of ACTIVE port " + "%s") % port['id'] + raise exceptions.InvalidInput(error_message=msg) def update_port_postcommit(self, context): port = context.current @@ -169,9 +177,10 @@ 'network_type': segment['network_type'], 'ports': {}}} tunnel_network_ports = ( - l2pop_db.get_dvr_active_network_ports(session, network_id)) + l2pop_db.get_distributed_active_network_ports(session, network_id)) fdb_network_ports = ( - l2pop_db.get_nondvr_active_network_ports(session, network_id)) + l2pop_db.get_nondistributed_active_network_ports(session, + network_id)) ports = agent_fdb_entries[network_id]['ports'] ports.update(self._get_tunnels( fdb_network_ports + tunnel_network_ports, diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,6 +22,7 @@ import six from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc +from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager @six.add_metaclass(abc.ABCMeta) @@ -222,19 +223,41 @@ ''' pass - def get_agent_ports(self, fdb_entries, local_vlan_map): + def _get_lvm_getter(self, local_vlan_map): + def get_lvm_from_mapping(net_id, local_vlan_map): + """This introduces backward compatibility with local_vlan_map, will + be removed in Ocata. + """ + try: + return local_vlan_map[net_id] + except KeyError: + raise vlanmanager.MappingNotFound(net_id=net_id) + + def get_lvm_from_manager(net_id, local_vlan_map): + vlan_manager = vlanmanager.LocalVlanManager() + return vlan_manager.get(net_id) + + if local_vlan_map is not None: + vlanmanager.deprecate_local_vlan_map_in_object( + "%s.get_agent_ports()" % self.__class__.__name__, + stacklevel_extra=1) + return get_lvm_from_mapping + return get_lvm_from_manager + + def get_agent_ports(self, fdb_entries, local_vlan_map=None): """Generator to yield port info. - For each known (i.e found in local_vlan_map) network in + For each known (i.e found in VLAN manager) network in fdb_entries, yield (lvm, fdb_entries[network_id]['ports']) pair. :param fdb_entries: l2pop fdb entries - :param local_vlan_map: A dict to map network_id to - the corresponding lvm entry. + :param local_vlan_map: Deprecated. """ + lvm_getter = self._get_lvm_getter(local_vlan_map) for network_id, values in fdb_entries.items(): - lvm = local_vlan_map.get(network_id) - if lvm is None: + try: + lvm = lvm_getter(network_id, local_vlan_map) + except vlanmanager.MappingNotFound: continue agent_ports = values.get('ports') yield (lvm, agent_ports) @@ -281,7 +304,7 @@ @log_helpers.log_method_call def fdb_chg_ip_tun(self, context, br, fdb_entries, local_ip, - local_vlan_map): + local_vlan_map=None): '''fdb update when an IP of a port is updated. The ML2 l2-pop mechanism driver sends an fdb update rpc message when an @@ -305,13 +328,13 @@ PortInfo has .mac_address and .ip_address attrs. :param local_ip: local IP address of this agent. - :param local_vlan_map: A dict to map network_id to - the corresponding lvm entry. + :param local_vlan_map: Deprecated. ''' - + lvm_getter = self._get_lvm_getter(local_vlan_map) for network_id, agent_ports in fdb_entries.items(): - lvm = local_vlan_map.get(network_id) - if not lvm: + try: + lvm = lvm_getter(network_id, local_vlan_map) + except vlanmanager.MappingNotFound: continue for agent_ip, state in agent_ports.items(): diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,77 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_config import cfg +from neutron.conf.plugins.ml2.drivers import linuxbridge -from neutron._i18n import _ -DEFAULT_BRIDGE_MAPPINGS = [] -DEFAULT_INTERFACE_MAPPINGS = [] -DEFAULT_VXLAN_GROUP = '224.0.0.1' -DEFAULT_KERNEL_HZ_VALUE = 250 # [Hz] -DEFAULT_TC_TBF_LATENCY = 50 # [ms] - -vxlan_opts = [ - cfg.BoolOpt('enable_vxlan', default=True, - help=_("Enable VXLAN on the agent. Can be enabled when " - "agent is managed by ml2 plugin using linuxbridge " - "mechanism driver")), - cfg.IntOpt('ttl', - help=_("TTL for vxlan interface protocol packets.")), - cfg.IntOpt('tos', - help=_("TOS for vxlan interface protocol packets.")), - cfg.StrOpt('vxlan_group', default=DEFAULT_VXLAN_GROUP, - help=_("Multicast group(s) for vxlan interface. A range of " - "group addresses may be specified by using CIDR " - "notation. Specifying a range allows different VNIs to " - "use different group addresses, reducing or eliminating " - "spurious broadcast traffic to the tunnel endpoints. " - "To reserve a unique group for each possible " - "(24-bit) VNI, use a /8 such as 239.0.0.0/8. This " - "setting must be the same on all the agents.")), - cfg.IPOpt('local_ip', help=_("Local IP address of the VXLAN endpoints.")), - cfg.BoolOpt('l2_population', default=False, - help=_("Extension to use alongside ml2 plugin's l2population " - "mechanism driver. It enables the plugin to populate " - "VXLAN forwarding table.")), - cfg.BoolOpt('arp_responder', default=False, - help=_("Enable local ARP responder which provides local " - "responses instead of performing ARP broadcast into " - "the overlay. Enabling local ARP responder is not " - "fully compatible with the allowed-address-pairs " - "extension.") - ), -] - -bridge_opts = [ - cfg.ListOpt('physical_interface_mappings', - default=DEFAULT_INTERFACE_MAPPINGS, - help=_("Comma-separated list of " - ": tuples " - "mapping physical network names to the agent's " - "node-specific physical network interfaces to be used " - "for flat and VLAN networks. All physical networks " - "listed in network_vlan_ranges on the server should " - "have mappings to appropriate interfaces on each " - "agent.")), - cfg.ListOpt('bridge_mappings', - default=DEFAULT_BRIDGE_MAPPINGS, - help=_("List of :")), -] - -qos_options = [ - cfg.IntOpt('kernel_hz', default=DEFAULT_KERNEL_HZ_VALUE, - help=_("Value of host kernel tick rate (hz) for calculating " - "minimum burst value in bandwidth limit rules for " - "a port with QoS. See kernel configuration file for " - "HZ value and tc-tbf manual for more information.")), - cfg.IntOpt('tbf_latency', default=DEFAULT_TC_TBF_LATENCY, - help=_("Value of latency (ms) for calculating size of queue " - "for a port with QoS. See tc-tbf manual for more " - "information.")) -] - - -cfg.CONF.register_opts(vxlan_opts, "VXLAN") -cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE") -cfg.CONF.register_opts(qos_options, "QOS") +linuxbridge.register_linuxbridge_opts() diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -439,7 +439,7 @@ def add_tap_interface(self, network_id, network_type, physical_network, segmentation_id, tap_device_name, device_owner): - """Add tap interface and handle interface missing exeptions.""" + """Add tap interface and handle interface missing exceptions.""" try: return self._add_tap_interface(network_id, network_type, physical_network, segmentation_id, @@ -559,19 +559,30 @@ def remove_interface(self, bridge_name, interface_name): bridge_device = bridge_lib.BridgeDevice(bridge_name) if bridge_device.exists(): - if not bridge_lib.is_bridged_interface(interface_name): + if not bridge_device.owns_interface(interface_name): return True LOG.debug("Removing device %(interface_name)s from bridge " "%(bridge_name)s", {'interface_name': interface_name, 'bridge_name': bridge_name}) - if bridge_device.delif(interface_name): - return False - LOG.debug("Done removing device %(interface_name)s from bridge " - "%(bridge_name)s", - {'interface_name': interface_name, - 'bridge_name': bridge_name}) - return True + try: + bridge_device.delif(interface_name) + LOG.debug("Done removing device %(interface_name)s from " + "bridge %(bridge_name)s", + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return True + except RuntimeError: + with excutils.save_and_reraise_exception() as ctxt: + if not bridge_device.owns_interface(interface_name): + # the exception was likely a side effect of the tap + # being deleted by some other agent during handling + ctxt.reraise = False + LOG.debug("Cannot remove %(interface_name)s from " + "%(bridge_name)s. It is not on the bridge.", + {'interface_name': interface_name, + 'bridge_name': bridge_name}) + return False else: LOG.debug("Cannot remove device %(interface_name)s bridge " "%(bridge_name)s does not exist", @@ -675,10 +686,12 @@ return (agent_ip in entries and mac in entries) def add_fdb_ip_entry(self, mac, ip, interface): - ip_lib.IPDevice(interface).neigh.add(ip, mac) + if cfg.CONF.VXLAN.arp_responder: + ip_lib.IPDevice(interface).neigh.add(ip, mac) def remove_fdb_ip_entry(self, mac, ip, interface): - ip_lib.IPDevice(interface).neigh.delete(ip, mac) + if cfg.CONF.VXLAN.arp_responder: + ip_lib.IPDevice(interface).neigh.delete(ip, mac) def add_fdb_bridge_entry(self, mac, agent_ip, interface, operation="add"): utils.execute(['bridge', 'fdb', operation, mac, 'dev', interface, diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py 2016-08-03 20:10:34.000000000 +0000 @@ -34,7 +34,7 @@ MAC_PATTERN = r"MAC\s+(?P[a-fA-F0-9:]+)," STATE_PATTERN = r"\s+link-state\s+(?P\w+)" ANY_PATTERN = ".*," - MACVTAP_PATTERN = r".*macvtap[0-9]+@(?P[a-zA-Z0-9]+):" + MACVTAP_PATTERN = r".*macvtap[0-9]+@(?P[a-zA-Z0-9_]+):" VF_LINE_FORMAT = VF_PATTERN + MAC_PATTERN + ANY_PATTERN + STATE_PATTERN VF_DETAILS_REG_EX = re.compile(VF_LINE_FORMAT) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -29,7 +29,7 @@ import six from neutron._i18n import _, _LE, _LI, _LW -from neutron.agent.l2.extensions import manager as ext_manager +from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.callbacks import resources @@ -186,7 +186,7 @@ def _create_agent_extension_manager(self, connection): ext_manager.register_opts(self.conf) - mgr = ext_manager.AgentExtensionsManager(self.conf) + mgr = ext_manager.L2AgentExtensionsManager(self.conf) mgr.initialize(connection, 'sriov') return mgr diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/mech_sriov/mech_driver/mech_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -34,11 +34,16 @@ sriov_opts = [ cfg.ListOpt('supported_pci_vendor_devs', - default=['15b3:1004', '8086:10ca'], help=_("Comma-separated list of supported PCI vendor devices, " "as defined by vendor_id:product_id according to the " - "PCI ID Repository. Default enables support for Intel " - "and Mellanox SR-IOV capable NICs.")), + "PCI ID Repository. Default None accept all PCI vendor " + "devices" + "DEPRECATED: This option is deprecated in the Newton " + "release and will be removed in the Ocata release. " + "Starting from Ocata the mechanism driver will accept " + "all PCI vendor devices."), + deprecated_for_removal=True), + ] cfg.CONF.register_opts(sriov_opts, "ml2_sriov") @@ -64,14 +69,12 @@ vif_details={portbindings.CAP_PORT_FILTER: False}, supported_vnic_types=[portbindings.VNIC_DIRECT, portbindings.VNIC_MACVTAP, - portbindings.VNIC_DIRECT_PHYSICAL], - supported_pci_vendor_info=None): + portbindings.VNIC_DIRECT_PHYSICAL]): """Initialize base class for SriovNicSwitch L2 agent type. :param agent_type: Constant identifying agent type in agents_db :param vif_details: Dictionary with details for VIF driver when bound :param supported_vnic_types: The binding:vnic_type values we can bind - :param supported_pci_vendor_info: The pci_vendor_info values to bind """ self.agent_type = agent_type self.supported_vnic_types = supported_vnic_types @@ -92,7 +95,8 @@ def initialize(self): try: self.pci_vendor_info = cfg.CONF.ml2_sriov.supported_pci_vendor_devs - self._check_pci_vendor_config(self.pci_vendor_info) + if self.pci_vendor_info is not None: + self._check_pci_vendor_config(self.pci_vendor_info) except ValueError: LOG.exception(_LE("Failed to parse supported PCI vendor devices")) raise cfg.Error(_("Parsing supported pci_vendor_devs failed")) @@ -173,6 +177,8 @@ return False def _check_supported_pci_vendor_device(self, context): + if self.pci_vendor_info is None: + return True if self.pci_vendor_info: profile = context.current.get(portbindings.PROFILE, {}) if not profile: diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py 2016-08-03 20:10:34.000000000 +0000 @@ -43,8 +43,12 @@ help=_("Peer patch port in tunnel bridge for integration " "bridge.")), cfg.IPOpt('local_ip', - help=_("Local IP address of tunnel endpoint. Can be either " - "an IPv4 or IPv6 address.")), + help=_("IP address of local overlay (tunnel) network endpoint. " + "Use either an IPv4 or IPv6 address that resides on one " + "of the host network interfaces. The IP version of this " + "value must match the value of the 'overlay_ip_version' " + "option in the ML2 plug-in configuration file on the " + "neutron server node(s).")), cfg.ListOpt('bridge_mappings', default=DEFAULT_BRIDGE_MAPPINGS, help=_("Comma-separated list of : " @@ -65,7 +69,7 @@ "integration bridge to physical networks. " "Support kernel without Open vSwitch patch port " "support so long as it is set to True.")), - cfg.StrOpt('of_interface', default='ovs-ofctl', + cfg.StrOpt('of_interface', default='native', choices=['ovs-ofctl', 'native'], help=_("OpenFlow interface to use.")), cfg.StrOpt('datapath_type', default=constants.OVS_DATAPATH_SYSTEM, diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py 2016-08-29 20:05:49.000000000 +0000 @@ -142,3 +142,6 @@ # A placeholder for dead vlans. DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1 + +# callback resource for setting 'bridge_name' in the 'binding:vif_details' +OVS_BRIDGE_NAME = 'ovs_bridge_name' diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py 2016-08-03 20:10:34.000000000 +0000 @@ -23,7 +23,7 @@ import ryu.app.ofctl.api as ofctl_api import ryu.exception as ryu_exc -from neutron._i18n import _LE, _LW +from neutron._i18n import _, _LW LOG = logging.getLogger(__name__) @@ -57,7 +57,7 @@ # The switch has not established a connection to us. # Wait for a little. if timeutils.now() > start_time + timeout_sec: - m = _LE("Switch connection timeout") + m = _("Switch connection timeout") LOG.error(m) # NOTE(yamamoto): use RuntimeError for compat with ovs_lib raise RuntimeError(m) @@ -70,7 +70,7 @@ try: result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi) except ryu_exc.RyuException as e: - m = _LE("ofctl request %(request)s error %(error)s") % { + m = _("ofctl request %(request)s error %(error)s") % { "request": msg, "error": e, } @@ -81,7 +81,7 @@ with excutils.save_and_reraise_exception() as ctx: if e is timeout: ctx.reraise = False - m = _LE("ofctl request %(request)s timed out") % { + m = _("ofctl request %(request)s timed out") % { "request": msg, } LOG.error(m) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py 2016-08-03 20:10:34.000000000 +0000 @@ -74,7 +74,7 @@ '''Implements the Agent API for Open vSwitch agent. Extensions can gain access to this API by overriding the consume_api - method which has been added to the AgentCoreResourceExtension class. + method which has been added to the AgentExtension class. ''' def __init__(self, int_br, tun_br): diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,23 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants + +from neutron.plugins.ml2.drivers.agent import capabilities +from neutron.services.trunk.drivers.openvswitch.agent import driver + + +def register(): + """Register OVS capabilities.""" + # Add capabilities to be loaded during agent initialization + capabilities.register(driver.init_handler, constants.AGENT_TYPE_OVS) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -204,8 +204,6 @@ def setup_dvr_flows_on_integ_br(self): '''Setup up initial dvr flows into br-int''' - if not self.in_distributed_mode(): - return LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"), self.dvr_mac_address) @@ -234,7 +232,7 @@ def setup_dvr_flows_on_tun_br(self): '''Setup up initial dvr flows into br-tun''' - if not self.enable_tunneling or not self.in_distributed_mode(): + if not self.enable_tunneling: return self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS, @@ -250,8 +248,6 @@ def setup_dvr_flows_on_phys_br(self): '''Setup up initial dvr flows into br-phys''' - if not self.in_distributed_mode(): - return for physical_network in self.bridge_mappings: self.phys_brs[physical_network].install_goto( @@ -313,10 +309,6 @@ self.registered_dvr_macs.remove(mac) def setup_dvr_mac_flows_on_all_brs(self): - if not self.in_distributed_mode(): - LOG.debug("Not in distributed mode, ignoring invocation " - "of get_dvr_mac_address_list() ") - return dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context) LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs) for mac in dvr_macs: @@ -570,8 +562,8 @@ if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES + [p_const.TYPE_VLAN]): LOG.debug("DVR: Port %s is with network_type %s not supported" - " for dvr plumbing" % (port.vif_id, - local_vlan_map.network_type)) + " for dvr plumbing", port.vif_id, + local_vlan_map.network_type) return if (port.vif_id in self.local_ports and diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,6 +21,7 @@ import sys import time +import debtcollector import netaddr from neutron_lib import constants as n_const from oslo_config import cfg @@ -37,7 +38,7 @@ from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.common import utils -from neutron.agent.l2.extensions import manager as ext_manager +from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.callbacks import resources @@ -51,13 +52,17 @@ from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as p_utils +from neutron.plugins.ml2.drivers.agent import capabilities from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common \ import constants from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_agent_extension_api as ovs_ext_api from neutron.plugins.ml2.drivers.openvswitch.agent \ + import ovs_capabilities +from neutron.plugins.ml2.drivers.openvswitch.agent \ import ovs_dvr_neutron_agent +from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager LOG = logging.getLogger(__name__) @@ -66,31 +71,15 @@ cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 'common.config') +LocalVLANMapping = debtcollector.moves.moved_class( + vlanmanager.LocalVLANMapping, 'LocalVLANMapping', __name__, + version='Newton', removal_version='Ocata') + class _mac_mydialect(netaddr.mac_unix): word_fmt = '%.2x' -class LocalVLANMapping(object): - - def __init__(self, vlan, network_type, physical_network, segmentation_id, - vif_ports=None): - if vif_ports is None: - vif_ports = {} - self.vlan = vlan - self.network_type = network_type - self.physical_network = physical_network - self.segmentation_id = segmentation_id - self.vif_ports = vif_ports - # set of tunnel ports on which packets should be flooded - self.tun_ofports = set() - - def __str__(self): - return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % - (self.vlan, self.network_type, self.physical_network, - self.segmentation_id)) - - class OVSPluginApi(agent_rpc.PluginApi): pass @@ -192,7 +181,7 @@ self.bridge_mappings = self._parse_bridge_mappings( ovs_conf.bridge_mappings) self.setup_physical_bridges(self.bridge_mappings) - self.local_vlan_map = {} + self.vlan_manager = vlanmanager.LocalVlanManager() self._reset_tunnel_ofports() @@ -234,7 +223,8 @@ if self.enable_tunneling: self.setup_tunnel_br_flows() - self.dvr_agent.setup_dvr_flows() + if self.enable_distributed_routing: + self.dvr_agent.setup_dvr_flows() # Collect additional bridges to monitor self.ancillary_brs = self.setup_ancillary_bridges( @@ -245,9 +235,9 @@ self._restore_local_vlan_map() # Security group agent support - self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, - self.sg_plugin_rpc, self.local_vlan_map, - defer_refresh_firewall=True, integration_bridge=self.int_br) + self.sg_agent = sg_rpc.SecurityGroupAgentRpc( + self.context, self.sg_plugin_rpc, defer_refresh_firewall=True, + integration_bridge=self.int_br) # we default to False to provide backward compat with out of tree # firewall drivers that expect the logic that existed on the Neutron @@ -303,6 +293,12 @@ self.quitting_rpc_timeout = agent_conf.quitting_rpc_timeout + @debtcollector.removals.removed_property( + version='Newton', removal_version='Ocata') + def local_vlan_map(self): + """Provide backward compatibility with local_vlan_map attribute""" + return self.vlan_manager.mapping + def _parse_bridge_mappings(self, bridge_mappings): try: return n_utils.parse_mappings(bridge_mappings) @@ -336,7 +332,9 @@ def _restore_local_vlan_map(self): self._local_vlan_hints = {} - cur_ports = self.int_br.get_vif_ports() + # skip INVALID and UNASSIGNED to match scan_ports behavior + ofport_filter = (ovs_lib.INVALID_OFPORT, ovs_lib.UNASSIGNED_OFPORT) + cur_ports = self.int_br.get_vif_ports(ofport_filter) port_names = [p.port_name for p in cur_ports] port_info = self.int_br.get_ports_attributes( "Port", columns=["name", "other_config", "tag"], ports=port_names) @@ -394,17 +392,22 @@ def init_extension_manager(self, connection): ext_manager.register_opts(self.conf) self.ext_manager = ( - ext_manager.AgentExtensionsManager(self.conf)) + ext_manager.L2AgentExtensionsManager(self.conf)) self.agent_api = ovs_ext_api.OVSAgentExtensionAPI(self.int_br, self.tun_br) self.ext_manager.initialize( connection, constants.EXTENSION_DRIVER_TYPE, self.agent_api) + @debtcollector.moves.moved_method( + 'get_net_uuid', + 'OVSNeutronAgent.get_net_uuid() moved to vlanmanager.LocalVlanManager', + removal_version='Ocata') def get_net_uuid(self, vif_id): - for network_id, vlan_mapping in six.iteritems(self.local_vlan_map): - if vif_id in vlan_mapping.vif_ports: - return network_id + try: + return self.vlan_manager.get_net_uuid(vif_id) + except vlanmanager.VifIdNotFound: + pass def port_update(self, context, **kwargs): port = kwargs.get('port') @@ -509,8 +512,7 @@ def fdb_add(self, context, fdb_entries): LOG.debug("fdb_add received") - for lvm, agent_ports in self.get_agent_ports(fdb_entries, - self.local_vlan_map): + for lvm, agent_ports in self.get_agent_ports(fdb_entries): agent_ports.pop(self.local_ip, None) if len(agent_ports): if not self.enable_distributed_routing: @@ -523,8 +525,7 @@ def fdb_remove(self, context, fdb_entries): LOG.debug("fdb_remove received") - for lvm, agent_ports in self.get_agent_ports(fdb_entries, - self.local_vlan_map): + for lvm, agent_ports in self.get_agent_ports(fdb_entries): agent_ports.pop(self.local_ip, None) if len(agent_ports): if not self.enable_distributed_routing: @@ -572,7 +573,7 @@ LOG.debug("update chg_ip received") with self.tun_br.deferred() as deferred_br: self.fdb_chg_ip_tun(context, deferred_br, fdb_entries, - self.local_ip, self.local_vlan_map) + self.local_ip) def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, ip_address): @@ -636,10 +637,10 @@ # On a restart or crash of OVS, the network associated with this VLAN # will already be assigned, so check for that here before assigning a # new one. - lvm = self.local_vlan_map.get(net_uuid) - if lvm: + try: + lvm = self.vlan_manager.get(net_uuid) lvid = lvm.vlan - else: + except vlanmanager.MappingNotFound: lvid = self._local_vlan_hints.pop(net_uuid, None) if lvid is None: if not self.available_local_vlans: @@ -647,10 +648,9 @@ net_uuid) return lvid = self.available_local_vlans.pop() - self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, - network_type, - physical_network, - segmentation_id) + self.vlan_manager.add( + net_uuid, lvid, network_type, physical_network, + segmentation_id) LOG.info(_LI("Assigning %(vlan_id)s as local vlan for " "net-id=%(net_uuid)s"), @@ -711,8 +711,9 @@ :param net_uuid: the network uuid associated with this vlan. ''' - lvm = self.local_vlan_map.pop(net_uuid, None) - if lvm is None: + try: + lvm = vlanmanager.LocalVlanManager().pop(net_uuid) + except KeyError: LOG.debug("Network %s not used on agent.", net_uuid) return @@ -784,10 +785,10 @@ :param device_owner: the string indicative of owner of this port :param ovs_restarted: indicates if this is called for an OVS restart. ''' - if net_uuid not in self.local_vlan_map or ovs_restarted: + if net_uuid not in self.vlan_manager or ovs_restarted: self.provision_local_vlan(net_uuid, network_type, physical_network, segmentation_id) - lvm = self.local_vlan_map[net_uuid] + lvm = self.vlan_manager.get(net_uuid) lvm.vif_ports[port.vif_id] = port self.dvr_agent.bind_port_to_dvr(port, lvm, @@ -822,8 +823,9 @@ info_by_port = {x['name']: [x['tag'], x['other_config']] for x in port_info} for port_detail in need_binding_ports: - lvm = self.local_vlan_map.get(port_detail['network_id']) - if not lvm: + try: + lvm = self.vlan_manager.get(port_detail['network_id']) + except vlanmanager.MappingNotFound: continue port = port_detail['vif_port'] cur_info = info_by_port.get(port.port_name) @@ -842,8 +844,9 @@ "Port", columns=["name", "tag"], ports=port_names, if_exists=True) tags_by_name = {x['name']: x['tag'] for x in port_info} for port_detail in need_binding_ports: - lvm = self.local_vlan_map.get(port_detail['network_id']) - if not lvm: + try: + lvm = self.vlan_manager.get(port_detail['network_id']) + except vlanmanager.MappingNotFound: # network for port was deleted. skip this port since it # will need to be handled as a DEAD port in the next scan continue @@ -951,15 +954,15 @@ :param vif_id: the id of the vif :param net_uuid: the net_uuid this port is associated with. ''' - if net_uuid is None: - net_uuid = self.get_net_uuid(vif_id) - - if not self.local_vlan_map.get(net_uuid): - LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'), - net_uuid) + try: + net_uuid = net_uuid or self.vlan_manager.get_net_uuid(vif_id) + except vlanmanager.VifIdNotFound: + LOG.info( + _LI('port_unbound(): net_uuid %s not managed by VLAN manager'), + net_uuid) return - lvm = self.local_vlan_map[net_uuid] + lvm = self.vlan_manager.get(net_uuid) if vif_id in lvm.vif_ports: vif_port = lvm.vif_ports[vif_id] @@ -1103,6 +1106,7 @@ # The bridge already exists, so create won't recreate it, but will # handle things like changing the datapath_type br.create() + br.set_secure_mode() br.setup_controllers(self.conf) if cfg.CONF.AGENT.drop_flows_on_start: br.delete_flows() @@ -1372,7 +1376,7 @@ """ port_tags = self.int_br.get_port_tag_dict() changed_ports = set() - for lvm in self.local_vlan_map.values(): + for lvm in self.vlan_manager: for port in lvm.vif_ports.values(): if ( port.port_name in port_tags @@ -1448,7 +1452,7 @@ ofports = self.tun_br_ofports[tunnel_type].values() if ofports and not self.l2_pop: # Update flooding flows to include the new tunnel - for vlan_mapping in list(self.local_vlan_map.values()): + for vlan_mapping in self.vlan_manager: if vlan_mapping.network_type == tunnel_type: br.install_flood_to_tun(vlan_mapping.vlan, vlan_mapping.segmentation_id, @@ -1468,7 +1472,7 @@ def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): # Check if this tunnel port is still used - for lvm in self.local_vlan_map.values(): + for lvm in self.vlan_manager: if tun_ofport in lvm.tun_ofports: break # If not, remove it @@ -2151,10 +2155,12 @@ def main(bridge_classes): prepare_xen_compute() + ovs_capabilities.register() validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip) try: agent = OVSNeutronAgent(bridge_classes, cfg.CONF) + capabilities.notify_init_event(n_const.AGENT_TYPE_OVS, agent) except (RuntimeError, ValueError) as e: LOG.error(_LE("%s Agent terminated!"), e) sys.exit(1) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,118 @@ +# Copyright 2016 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import debtcollector +from neutron_lib import exceptions + +from neutron._i18n import _ + + +def deprecate_local_vlan_map_in_object(object_name, stacklevel_extra=0): + debtcollector.deprecate( + "local_vlan_map argument for %s was deprecated." % object_name, + version="Newton", removal_version="Ocata", + stacklevel=4 + stacklevel_extra) + + +class VifIdNotFound(exceptions.NeutronException): + message = _('VIF ID %(vif_id)s not found in any network managed by ' + 'VLAN Manager') + + +class MappingAlreadyExists(exceptions.NeutronException): + message = _('VLAN mapping for network with id %(net_id)s already exists') + + +class MappingNotFound(exceptions.NeutronException): + message = _('Mapping for network %(net_id)s not found.') + + +class LocalVLANMapping(object): + def __init__(self, vlan, network_type, physical_network, segmentation_id, + vif_ports=None): + self.vlan = vlan + self.network_type = network_type + self.physical_network = physical_network + self.segmentation_id = segmentation_id + self.vif_ports = vif_ports or {} + # set of tunnel ports on which packets should be flooded + self.tun_ofports = set() + + def __str__(self): + return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % + (self.vlan, self.network_type, self.physical_network, + self.segmentation_id)) + + def __eq__(self, other): + return all(hasattr(other, a) and getattr(self, a) == getattr(other, a) + for a in ['vlan', + 'network_type', + 'physical_network', + 'segmentation_id', + 'vif_ports']) + + def __hash__(self): + return id(self) + + +class LocalVlanManager(object): + """Singleton manager that maps internal VLAN mapping to external network + segmentation ids. + """ + + def __new__(cls): + if not hasattr(cls, '_instance'): + cls._instance = super(LocalVlanManager, cls).__new__(cls) + return cls._instance + + def __init__(self): + if not hasattr(self, 'mapping'): + self.mapping = {} + + def __contains__(self, key): + return key in self.mapping + + def __iter__(self): + for value in list(self.mapping.values()): + yield value + + def items(self): + for item in self.mapping.items(): + yield item + + def add(self, net_id, vlan, network_type, physical_network, + segmentation_id, vif_ports=None): + if net_id in self.mapping: + raise MappingAlreadyExists(net_id=net_id) + self.mapping[net_id] = LocalVLANMapping( + vlan, network_type, physical_network, segmentation_id, vif_ports) + + def get_net_uuid(self, vif_id): + for network_id, vlan_mapping in self.mapping.items(): + if vif_id in vlan_mapping.vif_ports: + return network_id + raise VifIdNotFound(vif_id=vif_id) + + def get(self, net_id): + try: + return self.mapping[net_id] + except KeyError: + raise MappingNotFound(net_id=net_id) + + def pop(self, net_id): + try: + return self.mapping.pop(net_id) + except KeyError: + raise MappingNotFound(net_id=net_id) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,6 +19,8 @@ from oslo_config import cfg from neutron.agent import securitygroups_rpc +from neutron.callbacks import events +from neutron.callbacks import registry from neutron.extensions import portbindings from neutron.plugins.common import constants as p_constants from neutron.plugins.ml2 import driver_api as api @@ -42,7 +44,7 @@ """ supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, - qos_consts.RULE_TYPE_DSCP_MARK] + qos_consts.RULE_TYPE_DSCP_MARKING] def __init__(self): sg_enabled = securitygroups_rpc.is_firewall_enabled() @@ -86,13 +88,31 @@ return self.vif_type def get_vif_details(self, agent, context): + vif_details = self._pre_get_vif_details(agent, context) + self._set_bridge_name(context.current, vif_details) + return vif_details + + @staticmethod + def _set_bridge_name(port, vif_details): + # REVISIT(rawlin): add BridgeName as a nullable column to the Port + # model and simply check here if it's set and insert it into the + # vif_details. + + def set_bridge_name_inner(bridge_name): + vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name + + registry.notify( + a_const.OVS_BRIDGE_NAME, events.BEFORE_READ, + set_bridge_name_inner, port=port) + + def _pre_get_vif_details(self, agent, context): a_config = agent['configurations'] if a_config.get('datapath_type') != a_const.OVS_DATAPATH_NETDEV: details = dict(self.vif_details) hybrid = portbindings.OVS_HYBRID_PLUG if hybrid in a_config: - # we only override the vif_details for hybrid pluggin set - # in the constuctor if the agent specifically requests it + # we only override the vif_details for hybrid plugging set + # in the constructor if the agent specifically requests it details[hybrid] = a_config[hybrid] return details caps = a_config.get('ovs_capabilities', {}) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/type_flat.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/type_flat.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/type_flat.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/type_flat.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,16 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. +import sys + from neutron_lib import exceptions as exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log import six -import sqlalchemy as sa from neutron._i18n import _, _LI, _LW +from neutron.common import _deprecate from neutron.common import exceptions as n_exc -from neutron.db import model_base +from neutron.db.models.plugins.ml2 import flatallocation as type_flat_model from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import helpers @@ -41,19 +43,6 @@ cfg.CONF.register_opts(flat_opts, "ml2_type_flat") -class FlatAllocation(model_base.BASEV2): - """Represent persistent allocation state of a physical network. - - If a record exists for a physical network, then that physical - network has been allocated as a flat network. - """ - - __tablename__ = 'ml2_flat_allocations' - - physical_network = sa.Column(sa.String(64), nullable=False, - primary_key=True) - - class FlatTypeDriver(helpers.BaseTypeDriver): """Manage state for flat networks with ML2. @@ -114,7 +103,8 @@ try: LOG.debug("Reserving flat network on physical " "network %s", physical_network) - alloc = FlatAllocation(physical_network=physical_network) + alloc = type_flat_model.FlatAllocation( + physical_network=physical_network) alloc.save(session) except db_exc.DBDuplicateEntry: raise n_exc.FlatNetworkInUse( @@ -129,7 +119,7 @@ def release_segment(self, session, segment): physical_network = segment[api.PHYSICAL_NETWORK] with session.begin(subtransactions=True): - count = (session.query(FlatAllocation). + count = (session.query(type_flat_model.FlatAllocation). filter_by(physical_network=physical_network). delete()) if count: @@ -147,3 +137,9 @@ if physical_network in self.physnet_mtus: mtu.append(int(self.physnet_mtus[physical_network])) return min(mtu) if mtu else 0 + + +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE +_OLD_REF = sys.modules[__name__] +sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), type_flat_model) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/type_gre.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/type_gre.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/type_gre.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/type_gre.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,14 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. +import sys + from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log -import sqlalchemy as sa -from sqlalchemy import sql from neutron._i18n import _, _LE -from neutron.db import model_base +from neutron.common import _deprecate +from neutron.db.models.plugins.ml2 import gre_allocation_endpoints as gre_model from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import type_tunnel @@ -37,37 +38,11 @@ cfg.CONF.register_opts(gre_opts, "ml2_type_gre") -class GreAllocation(model_base.BASEV2): - - __tablename__ = 'ml2_gre_allocations' - - gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True, - autoincrement=False) - allocated = sa.Column(sa.Boolean, nullable=False, default=False, - server_default=sql.false(), index=True) - - -class GreEndpoints(model_base.BASEV2): - """Represents tunnel endpoint in RPC mode.""" - - __tablename__ = 'ml2_gre_endpoints' - __table_args__ = ( - sa.UniqueConstraint('host', - name='unique_ml2_gre_endpoints0host'), - model_base.BASEV2.__table_args__ - ) - ip_address = sa.Column(sa.String(64), primary_key=True) - host = sa.Column(sa.String(255), nullable=True) - - def __repr__(self): - return "" % self.ip_address - - class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): super(GreTypeDriver, self).__init__( - GreAllocation, GreEndpoints) + gre_model.GreAllocation, gre_model.GreEndpoints) def get_type(self): return p_const.TYPE_GRE @@ -93,3 +68,9 @@ def get_mtu(self, physical_network=None): mtu = super(GreTypeDriver, self).get_mtu(physical_network) return mtu - p_const.GRE_ENCAP_OVERHEAD if mtu else 0 + + +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE +_OLD_REF = sys.modules[__name__] +sys.modules[__name__] = _deprecate._DeprecateSubset(globals(), gre_model) +# WARNING: THESE MUST BE THE LAST TWO LINES IN THIS MODULE diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/type_tunnel.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/type_tunnel.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/drivers/type_tunnel.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/drivers/type_tunnel.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,6 +16,7 @@ import itertools import operator +import netaddr from neutron_lib import exceptions as exc from oslo_config import cfg from oslo_db import api as oslo_db_api @@ -28,6 +29,7 @@ from neutron._i18n import _, _LI, _LW from neutron.common import topics from neutron.db import api as db_api +from neutron.plugins.common import constants as p_const from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import helpers @@ -251,7 +253,9 @@ mtu.append(seg_mtu) if cfg.CONF.ml2.path_mtu > 0: mtu.append(cfg.CONF.ml2.path_mtu) - return min(mtu) if mtu else 0 + version = cfg.CONF.ml2.overlay_ip_version + ip_header_length = p_const.IP_HEADER_LENGTH[version] + return min(mtu) - ip_header_length if mtu else 0 class EndpointTunnelTypeDriver(TunnelTypeDriver): @@ -324,12 +328,19 @@ msg = _("Tunnel IP value needed by the ML2 plugin") raise exc.InvalidInput(error_message=msg) + host = kwargs.get('host') + version = netaddr.IPAddress(tunnel_ip).version + if version != cfg.CONF.ml2.overlay_ip_version: + msg = (_("Tunnel IP version does not match ML2 " + "overlay_ip_version, host: %(host)s, tunnel_ip: %(ip)s"), + {'host': host, 'ip': tunnel_ip}) + raise exc.InvalidInput(error_message=msg) + tunnel_type = kwargs.get('tunnel_type') if not tunnel_type: msg = _("Network type value needed by the ML2 plugin") raise exc.InvalidInput(error_message=msg) - host = kwargs.get('host') driver = self._type_manager.drivers.get(tunnel_type) if driver: # The given conditional statements will verify the following diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/extensions/dns_integration.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/extensions/dns_integration.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/extensions/dns_integration.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/extensions/dns_integration.py 2016-08-29 20:05:49.000000000 +0000 @@ -31,6 +31,7 @@ from neutron.services.externaldns import driver LOG = logging.getLogger(__name__) +DNS_DOMAIN_DEFAULT = 'openstacklocal.' class DNSExtensionDriver(api.ExtensionDriver): @@ -75,30 +76,31 @@ db_data[dns.DNSDOMAIN] = new_value def process_create_port(self, plugin_context, request_data, db_data): - if not request_data[dns.DNSNAME]: + if not request_data.get(dns.DNSNAME): return - network = self._get_network(plugin_context, db_data['network_id']) - if not network[dns.DNSDOMAIN]: - return - if self.external_dns_not_needed(plugin_context, network): + dns_name, is_dns_domain_default = self._get_request_dns_name( + request_data) + if is_dns_domain_default: return + network = self._get_network(plugin_context, db_data['network_id']) + if self.external_dns_not_needed( + plugin_context, network) or not network[dns.DNSDOMAIN]: + current_dns_name = '' + current_dns_domain = '' + else: + current_dns_name = dns_name + current_dns_domain = network[dns.DNSDOMAIN] + plugin_context.session.add(dns_db.PortDNS( port_id=db_data['id'], - current_dns_name=request_data[dns.DNSNAME], - current_dns_domain=network[dns.DNSDOMAIN], - previous_dns_name='', previous_dns_domain='')) + current_dns_name=current_dns_name, + current_dns_domain=current_dns_domain, + previous_dns_name='', previous_dns_domain='', + dns_name=dns_name)) + + def _update_dns_db(self, dns_name, dns_domain, db_data, + plugin_context, has_fixed_ips): - def process_update_port(self, plugin_context, request_data, db_data): - dns_name = request_data.get(dns.DNSNAME) - has_fixed_ips = 'fixed_ips' in request_data - if dns_name is None and not has_fixed_ips: - return - network = self._get_network(plugin_context, db_data['network_id']) - if not network[dns.DNSDOMAIN]: - return - if self.external_dns_not_needed(plugin_context, network): - return - dns_domain = network[dns.DNSDOMAIN] dns_data_db = plugin_context.session.query(dns_db.PortDNS).filter_by( port_id=db_data['id']).one_or_none() if dns_data_db: @@ -112,18 +114,67 @@ dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) if is_dns_name_changed: + dns_data_db[dns.DNSNAME] = dns_name dns_data_db['current_dns_name'] = dns_name if dns_name: dns_data_db['current_dns_domain'] = dns_domain else: dns_data_db['current_dns_domain'] = '' + + return dns_data_db + if dns_name: + dns_data_db = dns_db.PortDNS(port_id=db_data['id'], + current_dns_name=dns_name, + current_dns_domain=dns_domain, + previous_dns_name='', + previous_dns_domain='', + dns_name=dns_name) + plugin_context.session.add(dns_data_db) + return dns_data_db + + def process_update_port(self, plugin_context, request_data, db_data): + dns_name = request_data.get(dns.DNSNAME) + has_fixed_ips = 'fixed_ips' in request_data + if dns_name is None and not has_fixed_ips: return + if dns_name is not None: + dns_name, is_dns_domain_default = self._get_request_dns_name( + request_data) + if is_dns_domain_default: + self._extend_port_dict(plugin_context.session, db_data, + db_data, None) + return + network = self._get_network(plugin_context, db_data['network_id']) + dns_domain = network[dns.DNSDOMAIN] + dns_data_db = None + if not dns_domain or self.external_dns_not_needed(plugin_context, + network): + # No need to update external DNS service. Only process the port's + # dns_name attribute if necessary + if dns_name is not None: + dns_data_db = self._process_only_dns_name_update( + plugin_context, db_data, dns_name) + else: + dns_data_db = self._update_dns_db(dns_name, dns_domain, db_data, + plugin_context, has_fixed_ips) + self._extend_port_dict(plugin_context.session, db_data, db_data, + dns_data_db) + + def _process_only_dns_name_update(self, plugin_context, db_data, dns_name): + dns_data_db = plugin_context.session.query(dns_db.PortDNS).filter_by( + port_id=db_data['id']).one_or_none() + if dns_data_db: + dns_data_db['dns_name'] = dns_name + return dns_data_db if dns_name: - plugin_context.session.add(dns_db.PortDNS( - port_id=db_data['id'], - current_dns_name=dns_name, - current_dns_domain=dns_domain, - previous_dns_name='', previous_dns_domain='')) + dns_data_db = dns_db.PortDNS(port_id=db_data['id'], + current_dns_name='', + current_dns_domain='', + previous_dns_name='', + previous_dns_domain='', + dns_name=dns_name) + plugin_context.session.add(dns_data_db) + return dns_data_db def external_dns_not_needed(self, context, network): """Decide if ports in network need to be sent to the DNS service. @@ -140,10 +191,67 @@ response_data[dns.DNSDOMAIN] = db_data.dns_domain[dns.DNSDOMAIN] return response_data - def extend_port_dict(self, session, db_data, response_data): - response_data[dns.DNSNAME] = db_data[dns.DNSNAME] + def _get_dns_domain(self): + if not cfg.CONF.dns_domain: + return '' + if cfg.CONF.dns_domain.endswith('.'): + return cfg.CONF.dns_domain + return '%s.' % cfg.CONF.dns_domain + + def _get_request_dns_name(self, port): + dns_domain = self._get_dns_domain() + if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): + return (port.get(dns.DNSNAME, ''), False) + return ('', True) + + def _get_request_dns_name_and_domain_name(self, dns_data_db): + dns_domain = self._get_dns_domain() + dns_name = '' + if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): + if dns_data_db: + dns_name = dns_data_db.dns_name + return dns_name, dns_domain + + def _get_dns_names_for_port(self, ips, dns_data_db): + dns_assignment = [] + dns_name, dns_domain = self._get_request_dns_name_and_domain_name( + dns_data_db) + for ip in ips: + if dns_name: + hostname = dns_name + fqdn = dns_name + if not dns_name.endswith('.'): + fqdn = '%s.%s' % (dns_name, dns_domain) + else: + hostname = 'host-%s' % ip['ip_address'].replace( + '.', '-').replace(':', '-') + fqdn = hostname + if dns_domain: + fqdn = '%s.%s' % (hostname, dns_domain) + dns_assignment.append({'ip_address': ip['ip_address'], + 'hostname': hostname, + 'fqdn': fqdn}) + return dns_assignment + + def _get_dns_name_for_port_get(self, port, dns_data_db): + if port['fixed_ips']: + return self._get_dns_names_for_port(port['fixed_ips'], dns_data_db) + return [] + + def _extend_port_dict(self, session, db_data, response_data, dns_data_db): + if not dns_data_db: + response_data[dns.DNSNAME] = '' + else: + response_data[dns.DNSNAME] = dns_data_db[dns.DNSNAME] + response_data['dns_assignment'] = self._get_dns_name_for_port_get( + db_data, dns_data_db) return response_data + def extend_port_dict(self, session, db_data, response_data): + dns_data_db = db_data.dns + return self._extend_port_dict(session, db_data, response_data, + dns_data_db) + def _get_network(self, context, network_id): plugin = manager.NeutronManager.get_plugin() return plugin.get_network(context, network_id) @@ -232,7 +340,7 @@ port = kwargs['port'] dns_data_db = context.session.query(dns_db.PortDNS).filter_by( port_id=port['id']).one_or_none() - if not dns_data_db: + if not (dns_data_db and dns_data_db['current_dns_name']): return records = [ip['ip_address'] for ip in port['fixed_ips']] _send_data_to_external_dns_service(context, dns_driver, @@ -248,10 +356,10 @@ except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e: LOG.exception(_LE("Error publishing port data in external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " - "DNS service driver message '%(message)s'") - % {"name": dns_name, - "domain": dns_domain, - "message": e.msg}) + "DNS service driver message '%(message)s'"), + {"name": dns_name, + "domain": dns_domain, + "message": e.msg}) def _remove_data_from_external_dns_service(context, dns_driver, dns_domain, @@ -262,11 +370,11 @@ LOG.exception(_LE("Error deleting port data from external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "IP addresses '%(ips)s'. DNS service driver message " - "'%(message)s'") - % {"name": dns_name, - "domain": dns_domain, - "message": e.msg, - "ips": ', '.join(records)}) + "'%(message)s'"), + {"name": dns_name, + "domain": dns_domain, + "message": e.msg, + "ips": ', '.join(records)}) def _update_port_in_external_dns_service(resource, event, trigger, **kwargs): @@ -288,7 +396,8 @@ return dns_data_db = context.session.query(dns_db.PortDNS).filter_by( port_id=updated_port['id']).one_or_none() - if not dns_data_db: + if not (dns_data_db and (dns_data_db['previous_dns_name'] or dns_data_db[ + 'current_dns_name'])): return if dns_data_db['previous_dns_name']: _remove_data_from_external_dns_service( diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/managers.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/managers.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/managers.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/managers.py 2016-08-29 20:05:49.000000000 +0000 @@ -23,6 +23,7 @@ import stevedore from neutron._i18n import _, _LE, _LI, _LW +from neutron.db import api as db_api from neutron.db import segments_db from neutron.extensions import external_net from neutron.extensions import multiprovidernet as mpnet @@ -162,7 +163,7 @@ def _extend_network_dict_provider(self, network, segments): if not segments: - LOG.error(_LE("Network %s has no segments"), network['id']) + LOG.debug("Network %s has no segments", network['id']) for attr in provider.ATTRIBUTES: network[attr] = None elif len(segments) > 1: @@ -182,34 +183,50 @@ LOG.info(_LI("Initializing driver for type '%s'"), network_type) driver.obj.initialize() - def _add_network_segment(self, session, network_id, segment, mtu, + def _add_network_segment(self, context, network_id, segment, segment_index=0): segments_db.add_network_segment( - session, network_id, segment, segment_index) - if segment.get(api.MTU, 0) > 0: - mtu.append(segment[api.MTU]) + context, network_id, segment, segment_index) def create_network_segments(self, context, network, tenant_id): """Call type drivers to create network segments.""" segments = self._process_provider_create(network) session = context.session - mtu = [] with session.begin(subtransactions=True): network_id = network['id'] if segments: for segment_index, segment in enumerate(segments): segment = self.reserve_provider_segment( session, segment) - self._add_network_segment(session, network_id, segment, - mtu, segment_index) + self._add_network_segment(context, network_id, segment, + segment_index) elif (cfg.CONF.ml2.external_network_type and self._get_attribute(network, external_net.EXTERNAL)): segment = self._allocate_ext_net_segment(session) - self._add_network_segment(session, network_id, segment, mtu) + self._add_network_segment(context, network_id, segment) else: segment = self._allocate_tenant_net_segment(session) - self._add_network_segment(session, network_id, segment, mtu) - network[api.MTU] = min(mtu) if mtu else 0 + self._add_network_segment(context, network_id, segment) + + def reserve_network_segment(self, session, segment_data): + """Call type drivers to reserve a network segment.""" + # Validate the data of segment + if not validators.is_attr_set(segment_data[api.NETWORK_TYPE]): + msg = _("network_type required") + raise exc.InvalidInput(error_message=msg) + + net_type = self._get_attribute(segment_data, api.NETWORK_TYPE) + phys_net = self._get_attribute(segment_data, api.PHYSICAL_NETWORK) + seg_id = self._get_attribute(segment_data, api.SEGMENTATION_ID) + segment = {api.NETWORK_TYPE: net_type, + api.PHYSICAL_NETWORK: phys_net, + api.SEGMENTATION_ID: seg_id} + + self.validate_provider_segment(segment) + + # Reserve segment in type driver + with session.begin(subtransactions=True): + return self.reserve_provider_segment(session, segment) def is_partial_segment(self, segment): network_type = segment[api.NETWORK_TYPE] @@ -257,26 +274,30 @@ filter_dynamic=None) for segment in segments: - network_type = segment.get(api.NETWORK_TYPE) - driver = self.drivers.get(network_type) - if driver: - driver.obj.release_segment(session, segment) - else: - LOG.error(_LE("Failed to release segment '%s' because " - "network type is not supported."), segment) + self.release_network_segment(session, segment) + + def release_network_segment(self, session, segment): + network_type = segment.get(api.NETWORK_TYPE) + driver = self.drivers.get(network_type) + if driver: + driver.obj.release_segment(session, segment) + else: + LOG.error(_LE("Failed to release segment '%s' because " + "network type is not supported."), segment) - def allocate_dynamic_segment(self, session, network_id, segment): + def allocate_dynamic_segment(self, context, network_id, segment): """Allocate a dynamic segment using a partial or full segment dict.""" dynamic_segment = segments_db.get_dynamic_segment( - session, network_id, segment.get(api.PHYSICAL_NETWORK), + context.session, network_id, segment.get(api.PHYSICAL_NETWORK), segment.get(api.SEGMENTATION_ID)) if dynamic_segment: return dynamic_segment driver = self.drivers.get(segment.get(api.NETWORK_TYPE)) - dynamic_segment = driver.obj.reserve_provider_segment(session, segment) - segments_db.add_network_segment(session, network_id, dynamic_segment, + dynamic_segment = driver.obj.reserve_provider_segment(context.session, + segment) + segments_db.add_network_segment(context, network_id, dynamic_segment, is_dynamic=True) return dynamic_segment @@ -391,37 +412,51 @@ raise vlantransparent.VlanTransparencyDriverError() def _call_on_drivers(self, method_name, context, - continue_on_failure=False): + continue_on_failure=False, raise_db_retriable=False): """Helper method for calling a method across all mechanism drivers. :param method_name: name of the method to call :param context: context parameter to pass to each method call :param continue_on_failure: whether or not to continue to call all mechanism drivers once one has raised an exception - :raises: neutron.plugins.ml2.common.MechanismDriverError - if any mechanism driver call fails. + :param raise_db_retriable: whether or not to treat retriable db + exception by mechanism drivers to propagate up to upper layer so + that upper layer can handle it or error in ML2 player + :raises: neutron.plugins.ml2.common.MechanismDriverError + if any mechanism driver call fails. or DB retriable error when + raise_db_retriable=False. See neutron.db.api.is_retriable for + what db exception is retriable """ - error = False + errors = [] for driver in self.ordered_mech_drivers: try: getattr(driver.obj, method_name)(context) - except Exception: + except Exception as e: + if raise_db_retriable and db_api.is_retriable(e): + with excutils.save_and_reraise_exception(): + LOG.debug("DB exception raised by Mechanism driver " + "'%(name)s' in %(method)s", + {'name': driver.name, 'method': method_name}, + exc_info=e) LOG.exception( _LE("Mechanism driver '%(name)s' failed in %(method)s"), {'name': driver.name, 'method': method_name} ) - error = True + errors.append(e) if not continue_on_failure: break - if error: + if errors: raise ml2_exc.MechanismDriverError( - method=method_name + method=method_name, + errors=errors ) def create_network_precommit(self, context): """Notify all mechanism drivers during network creation. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_network_precommit call fails. Called within the database transaction. If a mechanism driver @@ -430,7 +465,8 @@ that all mechanism drivers are called in this case. """ self._check_vlan_transparency(context) - self._call_on_drivers("create_network_precommit", context) + self._call_on_drivers("create_network_precommit", context, + raise_db_retriable=True) def create_network_postcommit(self, context): """Notify all mechanism drivers after network creation. @@ -449,7 +485,9 @@ def update_network_precommit(self, context): """Notify all mechanism drivers during network update. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_network_precommit call fails. Called within the database transaction. If a mechanism driver @@ -457,7 +495,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("update_network_precommit", context) + self._call_on_drivers("update_network_precommit", context, + raise_db_retriable=True) def update_network_postcommit(self, context): """Notify all mechanism drivers after network update. @@ -476,7 +515,9 @@ def delete_network_precommit(self, context): """Notify all mechanism drivers during network deletion. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_network_precommit call fails. Called within the database transaction. If a mechanism driver @@ -484,7 +525,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("delete_network_precommit", context) + self._call_on_drivers("delete_network_precommit", context, + raise_db_retriable=True) def delete_network_postcommit(self, context): """Notify all mechanism drivers after network deletion. @@ -507,7 +549,9 @@ def create_subnet_precommit(self, context): """Notify all mechanism drivers during subnet creation. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_subnet_precommit call fails. Called within the database transaction. If a mechanism driver @@ -515,7 +559,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("create_subnet_precommit", context) + self._call_on_drivers("create_subnet_precommit", context, + raise_db_retriable=True) def create_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet creation. @@ -534,7 +579,9 @@ def update_subnet_precommit(self, context): """Notify all mechanism drivers during subnet update. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_subnet_precommit call fails. Called within the database transaction. If a mechanism driver @@ -542,7 +589,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("update_subnet_precommit", context) + self._call_on_drivers("update_subnet_precommit", context, + raise_db_retriable=True) def update_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet update. @@ -561,7 +609,9 @@ def delete_subnet_precommit(self, context): """Notify all mechanism drivers during subnet deletion. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_subnet_precommit call fails. Called within the database transaction. If a mechanism driver @@ -569,7 +619,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("delete_subnet_precommit", context) + self._call_on_drivers("delete_subnet_precommit", context, + raise_db_retriable=True) def delete_subnet_postcommit(self, context): """Notify all mechanism drivers after subnet deletion. @@ -592,7 +643,9 @@ def create_port_precommit(self, context): """Notify all mechanism drivers during port creation. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver create_port_precommit call fails. Called within the database transaction. If a mechanism driver @@ -600,7 +653,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("create_port_precommit", context) + self._call_on_drivers("create_port_precommit", context, + raise_db_retriable=True) def create_port_postcommit(self, context): """Notify all mechanism drivers of port creation. @@ -619,7 +673,9 @@ def update_port_precommit(self, context): """Notify all mechanism drivers during port update. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises: DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver update_port_precommit call fails. Called within the database transaction. If a mechanism driver @@ -627,7 +683,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("update_port_precommit", context) + self._call_on_drivers("update_port_precommit", context, + raise_db_retriable=True) def update_port_postcommit(self, context): """Notify all mechanism drivers after port update. @@ -646,7 +703,9 @@ def delete_port_precommit(self, context): """Notify all mechanism drivers during port deletion. - :raises: neutron.plugins.ml2.common.MechanismDriverError + :raises:DB retriable error if create_network_precommit raises them + See neutron.db.api.is_retriable for what db exception is retriable + or neutron.plugins.ml2.common.MechanismDriverError if any mechanism driver delete_port_precommit call fails. Called within the database transaction. If a mechanism driver @@ -654,7 +713,8 @@ to the caller, triggering a rollback. There is no guarantee that all mechanism drivers are called in this case. """ - self._call_on_drivers("delete_port_precommit", context) + self._call_on_drivers("delete_port_precommit", context, + raise_db_retriable=True) def delete_port_postcommit(self, context): """Notify all mechanism drivers after port deletion. diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/plugin.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/plugin.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/plugin.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,7 +19,6 @@ from neutron_lib import exceptions as exc from oslo_concurrency import lockutils from oslo_config import cfg -from oslo_db import api as oslo_db_api from oslo_db import exception as os_db_exception from oslo_log import helpers as log_helpers from oslo_log import log @@ -27,7 +26,6 @@ from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import uuidutils -from sqlalchemy import exc as sql_exc from sqlalchemy.orm import exc as sa_exc from neutron._i18n import _, _LE, _LI, _LW @@ -57,17 +55,18 @@ from neutron.db import dvr_mac_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db +from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 -from neutron.db import netmtu_db from neutron.db import provisioning_blocks from neutron.db.quota import driver # noqa -from neutron.db import securitygroups_db from neutron.db import securitygroups_rpc_base as sg_db_rpc from neutron.db import segments_db +from neutron.db import subnet_service_type_db_models as service_type_db from neutron.db import vlantransparent_db from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import availability_zone as az_ext from neutron.extensions import extra_dhcp_opt as edo_ext +from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import portbindings from neutron.extensions import portsecurity as psec from neutron.extensions import providernet as provider @@ -85,6 +84,7 @@ from neutron.plugins.ml2 import rpc from neutron.quota import resource_registry from neutron.services.qos import qos_consts +from neutron.services.segments import plugin as segments_plugin LOG = log.getLogger(__name__) @@ -104,8 +104,8 @@ addr_pair_db.AllowedAddressPairsMixin, vlantransparent_db.Vlantransparent_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, - netmtu_db.Netmtu_db_mixin, - address_scope_db.AddressScopeDbMixin): + address_scope_db.AddressScopeDbMixin, + service_type_db.SubnetServiceTypeMixin): """Implement the Neutron L2 abstractions using modules. @@ -133,7 +133,8 @@ "address-scope", "availability_zone", "network_availability_zone", - "default-subnetpools"] + "default-subnetpools", + "subnet-service-types"] @property def supported_extension_aliases(self): @@ -150,8 +151,8 @@ port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, - security_group=securitygroups_db.SecurityGroup, - security_group_rule=securitygroups_db.SecurityGroupRule) + security_group=sg_models.SecurityGroup, + security_group_rule=sg_models.SecurityGroupRule) def __init__(self): # First load drivers, then initialize DB, then initialize drivers self.type_manager = managers.TypeManager() @@ -163,6 +164,14 @@ self.mechanism_manager.initialize() registry.subscribe(self._port_provisioned, resources.PORT, provisioning_blocks.PROVISIONING_COMPLETE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.PRECOMMIT_CREATE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.PRECOMMIT_DELETE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.AFTER_CREATE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.AFTER_DELETE) self._setup_dhcp() self._start_rpc_notifiers() self.add_agent_status_check_worker(self.agent_health_check) @@ -204,7 +213,7 @@ **kwargs): port_id = object_id port = db.get_port(context.session, port_id) - if not port: + if not port or not port.port_binding: LOG.debug("Port %s was deleted so its status cannot be updated.", port_id) return @@ -215,6 +224,16 @@ LOG.debug("Port %s cannot update to ACTIVE because it " "is not bound.", port_id) return + else: + # port is bound, but we have to check for new provisioning blocks + # one last time to detect the case where we were triggered by an + # unbound port and the port became bound with new provisioning + # blocks before 'get_port' was called above + if provisioning_blocks.is_object_blocked(context, port_id, + resources.PORT): + LOG.debug("Port %s had new provisioning blocks added so it " + "will not transition to active.", port_id) + return self.update_port_status(context, port_id, const.PORT_STATUS_ACTIVE) @property @@ -466,7 +485,7 @@ # in the distributed case. Since the PortBinding # instance will then be needed, it does not make sense # to optimize this code to avoid fetching it. - cur_binding = db.get_dvr_port_binding_by_host( + cur_binding = db.get_distributed_port_binding_by_host( session, port_id, orig_binding.host) cur_context = driver_context.PortContext( self, plugin_context, port, network, cur_binding, None, @@ -651,11 +670,12 @@ 'result': result, 'attributes': attrs}) - except Exception: + except Exception as e: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("An exception occurred while creating " - "the %(resource)s:%(item)s"), - {'resource': resource, 'item': item}) + utils.attach_exc_details( + e, _LE("An exception occurred while creating " + "the %(resource)s:%(item)s"), + {'resource': resource, 'item': item}) try: postcommit_op = getattr(self.mechanism_manager, @@ -675,6 +695,41 @@ 'resource_ids': ', '.join(resource_ids)}) self._delete_objects(context, resource, objects) + def _get_network_mtu(self, network): + mtus = [] + try: + segments = network[mpnet.SEGMENTS] + except KeyError: + segments = [network] + for s in segments: + segment_type = s[provider.NETWORK_TYPE] + try: + type_driver = self.type_manager.drivers[segment_type].obj + except KeyError: + # NOTE(ihrachys) This can happen when type driver is not loaded + # for an existing segment. While it's probably an indication of + # a bad setup, it's better to be safe than sorry here. Also, + # several unit tests use non-existent driver types that may + # trigger the exception here. + LOG.warning( + _LW("Failed to determine MTU for segment " + "%(segment_type)s:%(segment_id)s; network " + "%(network_id)s MTU calculation may be not accurate"), + { + 'segment_type': segment_type, + 'segment_id': s[provider.SEGMENTATION_ID], + 'network_id': network['id'], + } + ) + else: + mtu = type_driver.get_mtu(s[provider.PHYSICAL_NETWORK]) + # Some drivers, like 'local', may return None; the assumption + # then is that for the segment type, MTU has no meaning or + # unlimited, and so we should then ignore those values. + if mtu: + mtus.append(mtu) + return min(mtus) if mtus else 0 + def _create_network_db(self, context, network): net_data = network[attributes.NETWORK] tenant_id = net_data['tenant_id'] @@ -691,13 +746,16 @@ self.type_manager.create_network_segments(context, net_data, tenant_id) self.type_manager.extend_network_dict_provider(context, result) + # Update the transparent vlan if configured + if utils.is_extension_supported(self, 'vlan-transparent'): + vlt = vlantransparent.get_vlan_transparent(net_data) + net_db['vlan_transparent'] = vlt + result['vlan_transparent'] = vlt mech_context = driver_context.NetworkContext(self, context, result) self.mechanism_manager.create_network_precommit(mech_context) - if net_data.get(api.MTU, 0) > 0: - net_db[api.MTU] = net_data[api.MTU] - result[api.MTU] = net_data[api.MTU] + result[api.MTU] = self._get_network_mtu(result) if az_ext.AZ_HINTS in net_data: self.validate_availability_zones(context, 'network', @@ -707,17 +765,14 @@ net_db[az_ext.AZ_HINTS] = az_hints result[az_ext.AZ_HINTS] = az_hints - # Update the transparent vlan if configured - if utils.is_extension_supported(self, 'vlan-transparent'): - vlt = vlantransparent.get_vlan_transparent(net_data) - net_db['vlan_transparent'] = vlt - result['vlan_transparent'] = vlt - self._apply_dict_extend_functions('networks', result, net_db) return result, mech_context + @utils.transaction_guard def create_network(self, context, network): result, mech_context = self._create_network_db(context, network) + kwargs = {'context': context, 'network': result} + registry.notify(resources.NETWORK, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: @@ -728,10 +783,12 @@ return result + @utils.transaction_guard def create_network_bulk(self, context, networks): objects = self._create_bulk_ml2(attributes.NETWORK, context, networks) return [obj['result'] for obj in objects] + @utils.transaction_guard def update_network(self, context, id, network): net_data = network[attributes.NETWORK] provider._raise_if_updates_provider_attributes(net_data) @@ -748,6 +805,8 @@ self.type_manager.extend_network_dict_provider(context, updated_network) + updated_network[api.MTU] = self._get_network_mtu(updated_network) + # TODO(QoS): Move out to the extension framework somehow. need_network_update_notify = ( qos_consts.QOS_POLICY_ID in net_data and @@ -763,6 +822,9 @@ # by re-calling update_network with the previous attributes. For # now the error is propagated to the caller, which is expected to # either undo/retry the operation or delete the resource. + kwargs = {'context': context, 'network': updated_network, + 'original_network': original_network} + registry.notify(resources.NETWORK, events.AFTER_UPDATE, self, **kwargs) self.mechanism_manager.update_network_postcommit(mech_context) if need_network_update_notify: self.notifier.network_update(context, updated_network) @@ -773,6 +835,7 @@ with session.begin(subtransactions=True): result = super(Ml2Plugin, self).get_network(context, id, None) self.type_manager.extend_network_dict_provider(context, result) + result[api.MTU] = self._get_network_mtu(result) return self._fields(result, fields) @@ -787,6 +850,9 @@ nets = self._filter_nets_provider(context, nets, filters) + for net in nets: + net[api.MTU] = self._get_network_mtu(net) + return [self._fields(net, fields) for net in nets] def _delete_ports(self, context, port_ids): @@ -797,10 +863,11 @@ # concurrent port deletion can be performed by # release_dhcp_port caused by concurrent subnet_delete LOG.info(_LI("Port %s was deleted concurrently"), port_id) - except Exception: + except Exception as e: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Exception auto-deleting port %s"), - port_id) + utils.attach_exc_details( + e, + _LE("Exception auto-deleting port %s"), port_id) def _delete_subnets(self, context, subnet_ids): for subnet_id in subnet_ids: @@ -809,10 +876,11 @@ except (exc.SubnetNotFound, sa_exc.ObjectDeletedError): LOG.info(_LI("Subnet %s was deleted concurrently"), subnet_id) - except Exception: + except Exception as e: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Exception auto-deleting subnet %s"), - subnet_id) + utils.attach_exc_details( + e, + _LE("Exception auto-deleting subnet %s"), subnet_id) @utils.transaction_guard def delete_network(self, context, id): @@ -868,7 +936,11 @@ self.mechanism_manager.delete_network_precommit( mech_context) - self.type_manager.release_network_segments(session, id) + registry.notify(resources.NETWORK, + events.PRECOMMIT_DELETE, + self, + context=context, + network_id=id) record = self._get_network(context, id) LOG.debug("Deleting network record %s", record) session.delete(record) @@ -880,16 +952,15 @@ port_ids = [port.id for port in ports] subnet_ids = [subnet.id for subnet in subnets] - except os_db_exception.DBError as e: - with excutils.save_and_reraise_exception() as ctxt: - if isinstance(e.inner_exception, sql_exc.IntegrityError): - ctxt.reraise = False - LOG.warning(_LW("A concurrent port creation has " - "occurred")) - continue + except os_db_exception.DBDuplicateEntry: + LOG.warning(_LW("A concurrent port creation has " + "occurred")) + continue self._delete_ports(context, port_ids) self._delete_subnets(context, subnet_ids) + kwargs = {'context': context, 'network': network} + registry.notify(resources.NETWORK, events.AFTER_DELETE, self, **kwargs) try: self.mechanism_manager.delete_network_postcommit(mech_context) except ml2_exc.MechanismDriverError: @@ -902,6 +973,11 @@ def _create_subnet_db(self, context, subnet): session = context.session + # FIXME(kevinbenton): this is a mess because create_subnet ends up + # calling _update_router_gw_ports which ends up calling update_port + # on a router port inside this transaction. Need to find a way to + # separate router updates from the subnet update operation. + setattr(context, 'GUARD_TRANSACTION', False) with session.begin(subtransactions=True): result = super(Ml2Plugin, self).create_subnet(context, subnet) self.extension_manager.process_create_subnet( @@ -913,8 +989,11 @@ return result, mech_context + @utils.transaction_guard def create_subnet(self, context, subnet): result, mech_context = self._create_subnet_db(context, subnet) + kwargs = {'context': context, 'subnet': result} + registry.notify(resources.SUBNET, events.AFTER_CREATE, self, **kwargs) try: self.mechanism_manager.create_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: @@ -924,10 +1003,12 @@ self.delete_subnet(context, result['id']) return result + @utils.transaction_guard def create_subnet_bulk(self, context, subnets): objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets) return [obj['result'] for obj in objects] + @utils.transaction_guard def update_subnet(self, context, id, subnet): session = context.session with session.begin(subtransactions=True): @@ -946,6 +1027,9 @@ # by re-calling update_subnet with the previous attributes. For # now the error is propagated to the caller, which is expected to # either undo/retry the operation or delete the resource. + kwargs = {'context': context, 'subnet': updated_subnet, + 'original_subnet': original_subnet} + registry.notify(resources.SUBNET, events.AFTER_UPDATE, self, **kwargs) self.mechanism_manager.update_subnet_postcommit(mech_context) return updated_subnet @@ -1047,20 +1131,32 @@ if a.port: # calling update_port() for each allocation to remove the # IP from the port and call the MechanismDrivers - data = {attributes.PORT: - {'fixed_ips': [{'subnet_id': ip.subnet_id, - 'ip_address': ip.ip_address} - for ip in a.port.fixed_ips - if ip.subnet_id != id]}} + fixed_ips = [{'subnet_id': ip.subnet_id, + 'ip_address': ip.ip_address} + for ip in a.port.fixed_ips + if ip.subnet_id != id] + # By default auto-addressed ips are not removed from port + # on port update, so mark subnet with 'delete_subnet' flag + # to force ip deallocation on port update. + if is_auto_addr_subnet: + fixed_ips.append({'subnet_id': id, + 'delete_subnet': True}) + data = {attributes.PORT: {'fixed_ips': fixed_ips}} try: - self.update_port(context, a.port_id, data) + # NOTE Don't inline port_id; needed for PortNotFound. + port_id = a.port_id + self.update_port(context, port_id, data) except exc.PortNotFound: - LOG.debug("Port %s deleted concurrently", a.port_id) - except Exception: + # NOTE Attempting to access a.port_id here is an error. + LOG.debug("Port %s deleted concurrently", port_id) + except Exception as e: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("Exception deleting fixed_ip " - "from port %s"), a.port_id) + utils.attach_exc_details( + e, _LE("Exception deleting fixed_ip from " + "port %s"), port_id) + kwargs = {'context': context, 'subnet': subnet} + registry.notify(resources.SUBNET, events.AFTER_DELETE, self, **kwargs) try: self.mechanism_manager.delete_subnet_postcommit(mech_context) except ml2_exc.MechanismDriverError: @@ -1090,6 +1186,11 @@ raise psec.PortSecurityAndIPRequiredForSecurityGroups() def _setup_dhcp_agent_provisioning_component(self, context, port): + # NOTE(kevinbenton): skipping network ports is a workaround for + # the fact that we don't issue dhcp notifications from internal + # port creation like router ports and dhcp ports via RPC + if utils.is_port_trusted(port): + return subnet_ids = [f['subnet_id'] for f in port['fixed_ips']] if (db.is_dhcp_active_on_any_subnet(context, subnet_ids) and any(self.get_configuration_dict(a).get('notifies_port_ready') @@ -1113,8 +1214,7 @@ attrs['status'] = const.PORT_STATUS_DOWN session = context.session - with db_api.exc_to_retry(os_db_exception.DBDuplicateEntry),\ - session.begin(subtransactions=True): + with session.begin(subtransactions=True): dhcp_opts = attrs.get(edo_ext.EXTRADHCPOPTS, []) port_db = self.create_port_db(context, port) result = self._make_port_dict(port_db, process_extensions=False) @@ -1142,6 +1242,7 @@ self._apply_dict_extend_functions('ports', result, port_db) return result, mech_context + @utils.transaction_guard def create_port(self, context, port): # TODO(kevinbenton): remove when bug/1543094 is fixed. with lockutils.lock(port['port']['network_id'], @@ -1181,6 +1282,7 @@ return bound_context.current + @utils.transaction_guard def create_port_bulk(self, context, ports): objects = self._create_bulk_ml2(attributes.PORT, context, ports) @@ -1250,14 +1352,14 @@ if security_groups: raise psec.PortSecurityPortHasSecurityGroup() + @utils.transaction_guard def update_port(self, context, id, port): attrs = port[attributes.PORT] need_port_update_notify = False session = context.session bound_mech_contexts = [] - with db_api.exc_to_retry(os_db_exception.DBDuplicateEntry),\ - session.begin(subtransactions=True): + with session.begin(subtransactions=True): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: raise exc.PortNotFound(port_id=id) @@ -1311,16 +1413,17 @@ # DVR and non-DVR cases here. # TODO(Swami): This code need to be revisited. if port_db['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - dvr_binding_list = db.get_dvr_port_bindings(session, id) - for dvr_binding in dvr_binding_list: + dist_binding_list = db.get_distributed_port_bindings(session, + id) + for dist_binding in dist_binding_list: levels = db.get_binding_levels(session, id, - dvr_binding.host) - dvr_mech_context = driver_context.PortContext( + dist_binding.host) + dist_mech_context = driver_context.PortContext( self, context, updated_port, network, - dvr_binding, levels, original_port=original_port) + dist_binding, levels, original_port=original_port) self.mechanism_manager.update_port_precommit( - dvr_mech_context) - bound_mech_contexts.append(dvr_mech_context) + dist_mech_context) + bound_mech_contexts.append(dist_mech_context) else: self.mechanism_manager.update_port_precommit(mech_context) self._setup_dhcp_agent_provisioning_component( @@ -1369,7 +1472,7 @@ need_notify=need_port_update_notify) return bound_context.current - def _process_dvr_port_binding(self, mech_context, context, attrs): + def _process_distributed_port_binding(self, mech_context, context, attrs): session = mech_context._plugin_context.session binding = mech_context._binding port = mech_context.current @@ -1386,7 +1489,8 @@ binding.host = attrs and attrs.get(portbindings.HOST_ID) binding.router_id = attrs and attrs.get('device_id') - def update_dvr_port_binding(self, context, id, port): + @utils.transaction_guard + def update_distributed_port_binding(self, context, id, port): attrs = port[attributes.PORT] host = attrs and attrs.get(portbindings.HOST_ID) @@ -1397,7 +1501,7 @@ return session = context.session - binding = db.get_dvr_port_binding_by_host(session, id, host) + binding = db.get_distributed_port_binding_by_host(session, id, host) device_id = attrs and attrs.get('device_id') router_id = binding and binding.get('router_id') update_required = (not binding or @@ -1408,7 +1512,7 @@ with session.begin(subtransactions=True): orig_port = self.get_port(context, id) if not binding: - binding = db.ensure_dvr_port_binding( + binding = db.ensure_distributed_port_binding( session, id, host, router_id=device_id) network = self.get_network(context, orig_port['network_id']) @@ -1416,8 +1520,8 @@ mech_context = driver_context.PortContext(self, context, orig_port, network, binding, levels, original_port=orig_port) - self._process_dvr_port_binding(mech_context, context, - attrs) + self._process_distributed_port_binding( + mech_context, context, attrs) except (os_db_exception.DBReferenceError, exc.PortNotFound): LOG.debug("DVR Port %s has been deleted concurrently", id) return @@ -1442,6 +1546,7 @@ raise e.errors[0].error raise exc.ServicePortInUse(port_id=port_id, reason=e) + @utils.transaction_guard def delete_port(self, context, id, l3_port_check=True): self._pre_delete_port(context, id, l3_port_check) # TODO(armax): get rid of the l3 dependency in the with block @@ -1461,7 +1566,8 @@ bound_mech_contexts = [] device_owner = port['device_owner'] if device_owner == const.DEVICE_OWNER_DVR_INTERFACE: - bindings = db.get_dvr_port_bindings(context.session, id) + bindings = db.get_distributed_port_bindings(context.session, + id) for bind in bindings: levels = db.get_binding_levels(context.session, id, bind.host) @@ -1510,6 +1616,7 @@ self.notifier.port_delete(context, port['id']) self.notify_security_groups_member_updated(context, port) + @utils.transaction_guard def get_bound_port_context(self, plugin_context, port_id, host=None, cached_networks=None): session = plugin_context.session @@ -1534,7 +1641,7 @@ network = self.get_network(plugin_context, port['network_id']) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - binding = db.get_dvr_port_binding_by_host( + binding = db.get_distributed_port_binding_by_host( session, port['id'], host) if not binding: LOG.error(_LE("Binding info for DVR port %s not found"), @@ -1561,11 +1668,8 @@ return self._bind_port_if_needed(port_context) - @oslo_db_api.wrap_db_retry( - max_retries=db_api.MAX_RETRIES, retry_on_request=True, - exception_checker=lambda e: isinstance(e, (sa_exc.StaleDataError, - os_db_exception.DBDeadlock)) - ) + @utils.transaction_guard + @db_api.retry_db_errors def update_port_status(self, context, port_id, status, host=None, network=None): """ @@ -1597,7 +1701,7 @@ self.mechanism_manager.update_port_precommit(mech_context) updated = True elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - binding = db.get_dvr_port_binding_by_host( + binding = db.get_distributed_port_binding_by_host( session, port['id'], host) if not binding: return @@ -1616,7 +1720,8 @@ original_port = self._make_port_dict(port) network = network or self.get_network( context, original_port['network_id']) - port.status = db.generate_dvr_port_status(session, port['id']) + port.status = db.generate_distributed_port_status(session, + port['id']) updated_port = self._make_port_dict(port) levels = db.get_binding_levels(session, port_id, host) mech_context = (driver_context.PortContext( @@ -1638,7 +1743,7 @@ **kwargs) if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - db.delete_dvr_port_binding_if_stale(session, binding) + db.delete_distributed_port_binding_if_stale(session, binding) return port['id'] @@ -1650,7 +1755,8 @@ LOG.debug("No Port match for: %s", port_id) return if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE: - bindings = db.get_dvr_port_bindings(context.session, port_id) + bindings = db.get_distributed_port_bindings(context.session, + port_id) for b in bindings: if b.host == host: return port @@ -1704,3 +1810,47 @@ if mech_driver.obj.check_segment_for_agent(segment, agent): return True return False + + def _handle_segment_change(self, rtype, event, trigger, context, segment): + if (event == events.PRECOMMIT_CREATE and + not isinstance(trigger, segments_plugin.Plugin)): + # TODO(xiaohhui): Now, when create network, ml2 will reserve + # segment and trigger this event handler. This event handler + # will reserve segment again, which will lead to error as the + # segment has already been reserved. This check could be removed + # by unifying segment creation procedure. + return + + session = context.session + network_id = segment.get('network_id') + + if event == events.PRECOMMIT_CREATE: + updated_segment = self.type_manager.reserve_network_segment( + session, segment) + # The segmentation id might be from ML2 type driver, update it + # in the original segment. + segment[api.SEGMENTATION_ID] = updated_segment[api.SEGMENTATION_ID] + elif event == events.PRECOMMIT_DELETE: + self.type_manager.release_network_segment(session, segment) + + try: + self._notify_mechanism_driver_for_segment_change( + event, context, network_id) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("mechanism_manager error occurred when " + "handle event %(event)s for segment " + "'%(segment)s'"), + {'event': event, 'segment': segment['id']}) + + def _notify_mechanism_driver_for_segment_change(self, event, + context, network_id): + network_with_segments = self.get_network(context, network_id) + mech_context = driver_context.NetworkContext( + self, context, network_with_segments, + original_network=network_with_segments) + if (event == events.PRECOMMIT_CREATE or + event == events.PRECOMMIT_DELETE): + self.mechanism_manager.update_network_precommit(mech_context) + elif event == events.AFTER_CREATE or event == events.AFTER_DELETE: + self.mechanism_manager.update_network_postcommit(mech_context) diff -Nru neutron-9.0.0~b2~dev280/neutron/plugins/ml2/rpc.py neutron-9.0.0~b3~dev557/neutron/plugins/ml2/rpc.py --- neutron-9.0.0~b2~dev280/neutron/plugins/ml2/rpc.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/plugins/ml2/rpc.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,6 +14,7 @@ # under the License. from neutron_lib import constants as n_const +from neutron_lib import exceptions from oslo_log import log import oslo_messaging from sqlalchemy.orm import exc @@ -209,6 +210,17 @@ LOG.debug("Device %(device)s not bound to the" " agent host %(host)s", {'device': device, 'host': host}) + # this might mean that a VM is in the process of live migration + # and vif was plugged on the destination compute node; + # need to notify nova explicitly + try: + port = plugin._get_port(rpc_context, port_id) + except exceptions.PortNotFound: + LOG.debug("Port %s not found, will not notify nova.", port_id) + else: + if port.device_owner.startswith( + n_const.DEVICE_OWNER_COMPUTE_PREFIX): + plugin.nova_notifier.notify_port_active_direct(port) return if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE: # NOTE(kevinbenton): we have to special case DVR ports because of diff -Nru neutron-9.0.0~b2~dev280/neutron/policy.py neutron-9.0.0~b3~dev557/neutron/policy.py --- neutron-9.0.0~b2~dev280/neutron/policy.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/policy.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,7 +17,7 @@ import re from neutron_lib import constants -from neutron_lib import exceptions as lib_exc +from neutron_lib import exceptions from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging @@ -29,7 +29,6 @@ from neutron._i18n import _, _LE, _LW from neutron.api.v2 import attributes from neutron.common import constants as const -from neutron.common import exceptions LOG = logging.getLogger(__name__) @@ -92,10 +91,11 @@ # marked as being updated instead. return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and target[attribute_name] is not constants.ATTR_NOT_SPECIFIED) - return ('default' in resource[attribute_name] and - attribute_name in target and - target[attribute_name] is not constants.ATTR_NOT_SPECIFIED and - target[attribute_name] != resource[attribute_name]['default']) + result = (attribute_name in target and + target[attribute_name] is not constants.ATTR_NOT_SPECIFIED) + if result and 'default' in resource[attribute_name]: + return target[attribute_name] != resource[attribute_name]['default'] + return result def _should_validate_sub_attributes(attribute, sub_attr): @@ -264,7 +264,7 @@ target[parent_foreign_key], fields=[parent_field]) target[self.target_field] = data[parent_field] - except lib_exc.NotFound as e: + except exceptions.NotFound as e: # NOTE(kevinbenton): a NotFound exception can occur if a # list operation is happening at the same time as one of # the parents and its children being deleted. So we issue diff -Nru neutron-9.0.0~b2~dev280/neutron/quota/__init__.py neutron-9.0.0~b3~dev557/neutron/quota/__init__.py --- neutron-9.0.0~b2~dev280/neutron/quota/__init__.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/quota/__init__.py 2016-08-03 20:10:34.000000000 +0000 @@ -34,7 +34,6 @@ QUOTA_DB_MODULE = quota.QUOTA_DB_MODULE QUOTA_DB_DRIVER = quota.QUOTA_DB_DRIVER QUOTA_CONF_DRIVER = quota.QUOTA_CONF_DRIVER -default_quota_items = quota.default_quota_items # Register the configuration options @@ -280,15 +279,3 @@ QUOTAS = QuotaEngine.get_instance() - - -def register_resources_from_config(): - # This operation is now deprecated. All the neutron core and extended - # resource for which quota limits are enforced explicitly register - # themselves with the quota engine. - for resource_item in (set(cfg.CONF.QUOTAS.quota_items) - - set(default_quota_items)): - resource_registry.register_resource_by_name(resource_item) - - -register_resources_from_config() diff -Nru neutron-9.0.0~b2~dev280/neutron/quota/resource.py neutron-9.0.0~b3~dev557/neutron/quota/resource.py --- neutron-9.0.0~b2~dev280/neutron/quota/resource.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/quota/resource.py 2016-08-03 20:10:34.000000000 +0000 @@ -13,8 +13,6 @@ # under the License. from oslo_config import cfg -from oslo_db import api as oslo_db_api -from oslo_db import exception as oslo_db_exception from oslo_log import log from oslo_utils import excutils from sqlalchemy import event @@ -209,11 +207,7 @@ # can happen is two or more workers are trying to create a resource of a # give kind for the same tenant concurrently. Retrying the operation will # ensure that an UPDATE statement is emitted rather than an INSERT one - @oslo_db_api.wrap_db_retry( - max_retries=db_api.MAX_RETRIES, - exception_checker=lambda exc: - isinstance(exc, (oslo_db_exception.DBDuplicateEntry, - oslo_db_exception.DBDeadlock))) + @db_api.retry_db_errors def _set_quota_usage(self, context, tenant_id, in_use): return quota_api.set_quota_usage( context, self.name, tenant_id, in_use=in_use) diff -Nru neutron-9.0.0~b2~dev280/neutron/scheduler/dhcp_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/scheduler/dhcp_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/scheduler/dhcp_agent_scheduler.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/scheduler/dhcp_agent_scheduler.py 2016-08-29 20:05:49.000000000 +0000 @@ -30,6 +30,7 @@ from neutron.extensions import availability_zone as az_ext from neutron.scheduler import base_resource_filter from neutron.scheduler import base_scheduler +from neutron.services.segments import db as segments_db LOG = logging.getLogger(__name__) @@ -44,10 +45,16 @@ # a list of (agent, net_ids) tuples bindings_to_add = [] with context.session.begin(subtransactions=True): - fields = ['network_id', 'enable_dhcp'] + fields = ['network_id', 'enable_dhcp', 'segment_id'] subnets = plugin.get_subnets(context, fields=fields) - net_ids = set(s['network_id'] for s in subnets - if s['enable_dhcp']) + net_ids = {} + net_segment_ids = collections.defaultdict(set) + for s in subnets: + if s['enable_dhcp']: + net_segment_ids[s['network_id']].add(s.get('segment_id')) + for network_id, segment_ids in net_segment_ids.items(): + is_routed_network = any(segment_ids) + net_ids[network_id] = is_routed_network if not net_ids: LOG.debug('No non-hosted networks') return False @@ -57,17 +64,28 @@ agents_db.Agent.host == host, agents_db.Agent.admin_state_up == sql.true()) dhcp_agents = query.all() + + query = context.session.query( + segments_db.SegmentHostMapping.segment_id) + query = query.filter(segments_db.SegmentHostMapping.host == host) + segments_on_host = {s.segment_id for s in query} + for dhcp_agent in dhcp_agents: if agents_db.AgentDbMixin.is_agent_down( dhcp_agent.heartbeat_timestamp): LOG.warning(_LW('DHCP agent %s is not active'), dhcp_agent.id) continue - for net_id in net_ids: + for net_id, is_routed_network in net_ids.items(): agents = plugin.get_dhcp_agents_hosting_networks( context, [net_id]) - if len(agents) >= agents_per_network: - continue + segments_on_network = net_segment_ids[net_id] + if is_routed_network: + if len(segments_on_network & segments_on_host) == 0: + continue + else: + if len(agents) >= agents_per_network: + continue if any(dhcp_agent.id == agent.id for agent in agents): continue net = plugin.get_network(context, net_id) @@ -199,6 +217,18 @@ 'hosted_agents': agents_dict['hosted_agents']} return agents_dict + def _filter_agents_with_network_access(self, hostable_agents, plugin, + context, network): + if 'candidate_hosts' in network: + hostable_dhcp_hosts = network['candidate_hosts'] + else: + hostable_dhcp_hosts = plugin.filter_hosts_with_network_access( + context, network['id'], + [agent['host'] for agent in hostable_agents]) + reachable_agents = [agent for agent in hostable_agents + if agent['host'] in hostable_dhcp_hosts] + return reachable_agents + def _get_dhcp_agents_hosting_network(self, plugin, context, network): """Return dhcp agents hosting the given network or None if a given network is already hosted by enough number of agents. @@ -208,7 +238,7 @@ # subnets whose enable_dhcp is false with context.session.begin(subtransactions=True): network_hosted_agents = plugin.get_dhcp_agents_hosting_networks( - context, [network['id']]) + context, [network['id']], hosts=network.get('candidate_hosts')) if len(network_hosted_agents) >= agents_per_network: LOG.debug('Network %s is already hosted by enough agents.', network['id']) @@ -253,11 +283,8 @@ context, True, agent) ] - hostable_dhcp_hosts = plugin.filter_hosts_with_network_access( - context, network['id'], - [agent['host'] for agent in hostable_dhcp_agents]) - hostable_dhcp_agents = [agent for agent in hostable_dhcp_agents - if agent['host'] in hostable_dhcp_hosts] + hostable_dhcp_agents = self._filter_agents_with_network_access( + hostable_dhcp_agents, plugin, context, network) if not hostable_dhcp_agents: return {'n_agents': 0, 'hostable_agents': [], diff -Nru neutron-9.0.0~b2~dev280/neutron/scheduler/l3_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/scheduler/l3_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/scheduler/l3_agent_scheduler.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/scheduler/l3_agent_scheduler.py 2016-08-03 20:10:34.000000000 +0000 @@ -275,10 +275,11 @@ dep_getter = functools.partial(plugin.get_ha_network, ctxt, tenant_id) dep_creator = functools.partial(plugin._create_ha_network, ctxt, tenant_id) + dep_deleter = functools.partial(plugin._delete_ha_network, ctxt) dep_id_attr = 'network_id' try: port_binding = utils.create_object_with_dependency( - creator, dep_getter, dep_creator, dep_id_attr)[0] + creator, dep_getter, dep_creator, dep_id_attr, dep_deleter)[0] with db_api.autonested_transaction(context.session): port_binding.l3_agent_id = agent['id'] except db_exc.DBDuplicateEntry: @@ -307,17 +308,19 @@ agent) scheduled = False admin_ctx = context.elevated() - for router, agents in routers_agents: - max_agents_not_reached = ( - not self.max_ha_agents or agents < self.max_ha_agents) - if max_agents_not_reached: - if not self._router_has_binding(admin_ctx, router['id'], - agent.id): - self.create_ha_port_and_bind(plugin, admin_ctx, - router['id'], - router['tenant_id'], - agent) - scheduled = True + underscheduled_routers = [router for router, agents in routers_agents + if (not self.max_ha_agents or + agents < self.max_ha_agents)] + schedulable_routers = self._get_routers_can_schedule( + admin_ctx, plugin, underscheduled_routers, agent) + for router in schedulable_routers: + if not self._router_has_binding(admin_ctx, router['id'], + agent.id): + self.create_ha_port_and_bind(plugin, admin_ctx, + router['id'], + router['tenant_id'], + agent) + scheduled = True return scheduled diff -Nru neutron-9.0.0~b2~dev280/neutron/service.py neutron-9.0.0~b3~dev557/neutron/service.py --- neutron-9.0.0~b2~dev280/neutron/service.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/service.py 2016-08-29 20:05:49.000000000 +0000 @@ -27,6 +27,9 @@ from oslo_utils import importutils from neutron._i18n import _LE, _LI +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.common import config from neutron.common import profiler from neutron.common import rpc as n_rpc @@ -87,6 +90,7 @@ LOG.exception(_LE('Unrecoverable error: please check log ' 'for details.')) + registry.notify(resources.PROCESS, events.BEFORE_SPAWN, service) return service @@ -241,7 +245,7 @@ # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. - session.dispose() + session.context_manager.dispose_pool() for worker in process_workers: worker_launcher.launch_service(worker, @@ -276,7 +280,7 @@ def _get_api_workers(): workers = cfg.CONF.api_workers - if not workers: + if workers is None: workers = processutils.get_worker_count() return workers diff -Nru neutron-9.0.0~b2~dev280/neutron/services/auto_allocate/db.py neutron-9.0.0~b3~dev557/neutron/services/auto_allocate/db.py --- neutron-9.0.0~b2~dev280/neutron/services/auto_allocate/db.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/auto_allocate/db.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,6 +17,7 @@ from neutron_lib import exceptions as n_exc from oslo_db import exception as db_exc from oslo_log import log as logging +from oslo_utils import excutils from sqlalchemy import sql from neutron._i18n import _, _LE @@ -24,11 +25,13 @@ from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources +from neutron.common import exceptions as c_exc +from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 from neutron.db import external_net_db -from neutron.db import model_base from neutron.db import models_v2 +from neutron.db import standard_attr from neutron.extensions import l3 from neutron import manager from neutron.plugins.common import constants @@ -94,15 +97,29 @@ # - update router gateway -> prevent operation # - ... + @property + def core_plugin(self): + if not getattr(self, '_core_plugin', None): + self._core_plugin = manager.NeutronManager.get_plugin() + return self._core_plugin + + @property + def l3_plugin(self): + if not getattr(self, '_l3_plugin', None): + self._l3_plugin = manager.NeutronManager.get_service_plugins().get( + constants.L3_ROUTER_NAT) + return self._l3_plugin + def get_auto_allocated_topology(self, context, tenant_id, fields=None): """Return tenant's network associated to auto-allocated topology. The topology will be provisioned upon return, if network is missing. """ + fields = fields or [] tenant_id = self._validate(context, tenant_id) if CHECK_REQUIREMENTS in fields: # for dry-run requests, simply validates that subsequent - # requests can be fullfilled based on a set of requirements + # requests can be fulfilled based on a set of requirements # such as existence of default networks, pools, etc. return self._check_requirements(context, tenant_id) elif fields: @@ -119,26 +136,46 @@ context) # If we reach this point, then we got some work to do! - subnets = self._provision_tenant_private_network(context, tenant_id) - network_id = subnets[0]['network_id'] - router = self._provision_external_connectivity( - context, default_external_network, subnets, tenant_id) - network_id = self._save( - context, tenant_id, network_id, router['id'], subnets) + network_id = self._build_topology( + context, tenant_id, default_external_network) return self._response(network_id, tenant_id, fields=fields) - @property - def core_plugin(self): - if not getattr(self, '_core_plugin', None): - self._core_plugin = manager.NeutronManager.get_plugin() - return self._core_plugin + def delete_auto_allocated_topology(self, context, tenant_id): + tenant_id = self._validate(context, tenant_id) + topology = self._get_auto_allocated_topology(context, tenant_id) + if topology: + subnets = self.core_plugin.get_subnets( + context, + filters={'network_id': [topology['network_id']]}) + self._cleanup( + context, network_id=topology['network_id'], + router_id=topology['router_id'], subnets=subnets) - @property - def l3_plugin(self): - if not getattr(self, '_l3_plugin', None): - self._l3_plugin = manager.NeutronManager.get_service_plugins().get( - constants.L3_ROUTER_NAT) - return self._l3_plugin + def _build_topology(self, context, tenant_id, default_external_network): + """Build the network topology and returns its network UUID.""" + network_id = None + router_id = None + subnets = None + try: + subnets = self._provision_tenant_private_network( + context, tenant_id) + network_id = subnets[0]['network_id'] + router = self._provision_external_connectivity( + context, default_external_network, subnets, tenant_id) + network_id = self._save( + context, tenant_id, network_id, router['id'], subnets) + return network_id + except Exception as e: + with excutils.save_and_reraise_exception(): + # FIXME(armax): defensively catch all errors and let + # the caller retry the operation, if it can be retried. + # This catch-all should no longer be necessary once + # bug #1612798 is solved; any other error should just + # surface up to the user and be dealt with as a bug. + if db_api.is_retriable(e): + self._cleanup( + context, network_id=network_id, + router_id=router_id, subnets=subnets) def _check_requirements(self, context, tenant_id): """Raise if requirements are not met.""" @@ -163,12 +200,15 @@ return tenant_id - def _get_auto_allocated_network(self, context, tenant_id): - """Get the auto allocated network for the tenant.""" + def _get_auto_allocated_topology(self, context, tenant_id): + """Return the auto allocated topology record if present or None.""" with context.session.begin(subtransactions=True): - network = (context.session.query(models.AutoAllocatedTopology). + return (context.session.query(models.AutoAllocatedTopology). filter_by(tenant_id=tenant_id).first()) + def _get_auto_allocated_network(self, context, tenant_id): + """Get the auto allocated network for the tenant.""" + network = self._get_auto_allocated_topology(context, tenant_id) if network: return network['network_id'] @@ -187,8 +227,8 @@ external_net_db.ExternalNetwork). filter_by(is_default=sql.true()). join(models_v2.Network). - join(model_base.StandardAttribute). - order_by(model_base.StandardAttribute.id).all()) + join(standard_attr.StandardAttribute). + order_by(standard_attr.StandardAttribute.id).all()) if not default_external_networks: LOG.error(_LE("Unable to find default external network " @@ -241,10 +281,12 @@ subnets.append(p_utils.create_subnet( self.core_plugin, context, {'subnet': subnet_args})) return subnets - except (ValueError, n_exc.BadRequest, n_exc.NotFound): + except (c_exc.SubnetAllocationError, ValueError, + n_exc.BadRequest, n_exc.NotFound) as e: LOG.error(_LE("Unable to auto allocate topology for tenant " - "%s due to missing requirements, e.g. default " - "or shared subnetpools"), tenant_id) + "%(tenant_id)s due to missing or unmet " + "requirements. Reason: %(reason)s"), + {'tenant_id': tenant_id, 'reason': e}) if network: self._cleanup(context, network['id']) raise exceptions.AutoAllocationFailure( @@ -309,13 +351,29 @@ network_id = self._get_auto_allocated_network(context, tenant_id) return network_id + # FIXME(kevinbenton): get rid of the retry once bug/1612798 is resolved + @db_api.retry_db_errors def _cleanup(self, context, network_id=None, router_id=None, subnets=None): """Clean up auto allocated resources.""" + # Concurrent attempts to delete the topology may interleave and + # cause some operations to fail with NotFound exceptions. Rather + # than fail partially, the exceptions should be ignored and the + # cleanup should proceed uninterrupted. if router_id: for subnet in subnets or []: - self.l3_plugin.remove_router_interface( + ignore_notfound( + self.l3_plugin.remove_router_interface, context, router_id, {'subnet_id': subnet['id']}) - self.l3_plugin.delete_router(context, router_id) + ignore_notfound(self.l3_plugin.delete_router, context, router_id) if network_id: - self.core_plugin.delete_network(context, network_id) + ignore_notfound( + self.core_plugin.delete_network, context, network_id) + + +def ignore_notfound(func, *args, **kwargs): + """Call the given function and pass if a `NotFound` exception is raised.""" + try: + return func(*args, **kwargs) + except n_exc.NotFound: + pass diff -Nru neutron-9.0.0~b2~dev280/neutron/services/auto_allocate/models.py neutron-9.0.0~b3~dev557/neutron/services/auto_allocate/models.py --- neutron-9.0.0~b2~dev280/neutron/services/auto_allocate/models.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/auto_allocate/models.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,12 +18,11 @@ from neutron.db import model_base -class AutoAllocatedTopology(model_base.BASEV2): +class AutoAllocatedTopology(model_base.BASEV2, + model_base.HasProjectPrimaryKey): __tablename__ = 'auto_allocated_topologies' - tenant_id = sa.Column(sa.String(255), primary_key=True) - network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), diff -Nru neutron-9.0.0~b2~dev280/neutron/services/externaldns/drivers/designate/driver.py neutron-9.0.0~b3~dev557/neutron/services/externaldns/drivers/designate/driver.py --- neutron-9.0.0~b2~dev280/neutron/services/externaldns/drivers/designate/driver.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/externaldns/drivers/designate/driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,9 +22,8 @@ from keystoneauth1 import token_endpoint from neutron_lib import constants from oslo_config import cfg -from oslo_log import log -from neutron._i18n import _ +from neutron.conf.services import extdns_designate_driver from neutron.extensions import dns from neutron.services.externaldns import driver @@ -33,55 +32,10 @@ IPV6_PTR_ZONE_PREFIX_MIN_SIZE = 4 IPV6_PTR_ZONE_PREFIX_MAX_SIZE = 124 -LOG = log.getLogger(__name__) _SESSION = None -designate_opts = [ - cfg.StrOpt('url', - help=_('URL for connecting to designate')), - cfg.StrOpt('admin_username', - help=_('Username for connecting to designate in admin ' - 'context')), - cfg.StrOpt('admin_password', - help=_('Password for connecting to designate in admin ' - 'context'), - secret=True), - cfg.StrOpt('admin_tenant_id', - help=_('Tenant id for connecting to designate in admin ' - 'context')), - cfg.StrOpt('admin_tenant_name', - help=_('Tenant name for connecting to designate in admin ' - 'context')), - cfg.StrOpt('admin_auth_url', - help=_('Authorization URL for connecting to designate in admin ' - 'context')), - cfg.BoolOpt('insecure', default=False, - help=_('Skip cert validation for SSL based admin_auth_url')), - cfg.StrOpt('ca_cert', - help=_('CA certificate file to use to verify ' - 'connecting clients')), - cfg.BoolOpt('allow_reverse_dns_lookup', default=True, - help=_('Allow the creation of PTR records')), - cfg.IntOpt('ipv4_ptr_zone_prefix_size', default=24, - help=_('Number of bits in an ipv4 PTR zone that will be considered ' - 'network prefix. It has to align to byte boundary. Minimum ' - 'value is 8. Maximum value is 24. As a consequence, range ' - 'of values is 8, 16 and 24')), - cfg.IntOpt('ipv6_ptr_zone_prefix_size', default=120, - help=_('Number of bits in an ipv6 PTR zone that will be considered ' - 'network prefix. It has to align to nyble boundary. Minimum ' - 'value is 4. Maximum value is 124. As a consequence, range ' - 'of values is 4, 8, 12, 16,..., 124')), - cfg.StrOpt('ptr_zone_email', default='', - help=_('The email address to be used when creating PTR zones. ' - 'If not specified, the email address will be ' - 'admin@')), -] - -DESIGNATE_GROUP = 'designate' - CONF = cfg.CONF -CONF.register_opts(designate_opts, DESIGNATE_GROUP) +extdns_designate_driver.register_designate_opts() def get_clients(context): diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/l3_router_plugin.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/l3_router_plugin.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/l3_router_plugin.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/l3_router_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -30,9 +30,11 @@ from neutron.db import l3_dvrscheduler_db from neutron.db import l3_gwmode_db from neutron.db import l3_hamode_db +from neutron.extensions import l3 from neutron.plugins.common import constants from neutron.quota import resource_registry from neutron import service +from neutron.services.l3_router.service_providers import driver_controller from neutron.services import service_base @@ -55,7 +57,11 @@ """ supported_extension_aliases = ["dvr", "router", "ext-gw-mode", "extraroute", "l3_agent_scheduler", - "l3-ha", "router_availability_zone"] + "l3-ha", "router_availability_zone", + "l3-flavors"] + + __native_pagination_support = True + __native_sorting_support = True @resource_registry.tracked_resources(router=l3_db.Router, floatingip=l3_db.FloatingIP) @@ -66,13 +72,13 @@ super(L3RouterPlugin, self).__init__() if 'dvr' in self.supported_extension_aliases: l3_dvrscheduler_db.subscribe() - l3_db.subscribe() self.agent_notifiers.update( {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) rpc_worker = service.RpcWorker([self], worker_process_count=0) self.add_worker(rpc_worker) + self.l3_driver_controller = driver_controller.DriverController(self) @log_helpers.log_method_call def start_rpc_listeners(self): @@ -108,3 +114,11 @@ return super(L3RouterPlugin, self).create_floatingip( context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_DOWN) + + +def add_flavor_id(plugin, router_res, router_db): + router_res['flavor_id'] = router_db['flavor_id'] + + +common_db_mixin.CommonDbMixin.register_dict_extend_funcs( + l3.ROUTERS, [add_flavor_id]) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/base.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/base.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/base.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/base.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,55 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron._i18n import _ + + +class _FeatureFlag(object): + + def is_compatible(self, value): + if value == self.requires: + return True + if value and self.supports: + return True + return False + + def __init__(self, supports, requires): + self.supports = supports + self.requires = requires + if requires and not supports: + raise RuntimeError(_("A driver can't require a feature and not " + "support it.")) + +UNSUPPORTED = _FeatureFlag(supports=False, requires=False) +OPTIONAL = _FeatureFlag(supports=True, requires=False) +MANDATORY = _FeatureFlag(supports=True, requires=True) + + +class L3ServiceProvider(object): + """Base class for L3 service provider drivers. + + On __init__ this will be given a handle to the l3 plugin. It is then the + responsibility of the driver to subscribe to the events it is interested + in (e.g. router_create, router_update, router_delete, etc). + + The 'ha' and 'distributed' attributes below are used to determine if a + router request with the 'ha' or 'distributed' attribute can be supported + by this particular driver. These attributes must be present. + """ + + ha_support = UNSUPPORTED + distributed_support = UNSUPPORTED + + def __init__(self, l3plugin): + self.l3plugin = l3plugin diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/driver_controller.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/driver_controller.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/driver_controller.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/driver_controller.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,249 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants as lib_const +from neutron_lib import exceptions as lib_exc +from oslo_config import cfg +from oslo_log import log as logging + +from neutron._i18n import _ +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources +from neutron.db import servicetype_db as st_db +from neutron import manager +from neutron.plugins.common import constants +from neutron.services import provider_configuration +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class DriverController(object): + """Driver controller for the L3 service plugin. + + This component is responsible for dispatching router requests to L3 + service providers and for performing the bookkeeping about which + driver is associated with a given router. + + This is not intended to be accessed by the drivers or the l3 plugin. + All of the methods are marked as private to reflect this. + """ + + def __init__(self, l3_plugin): + self.l3_plugin = l3_plugin + self._stm = st_db.ServiceTypeManager.get_instance() + self._stm.add_provider_configuration( + constants.L3_ROUTER_NAT, _LegacyPlusProviderConfiguration()) + self._load_drivers() + registry.subscribe(self._set_router_provider, + resources.ROUTER, events.PRECOMMIT_CREATE) + registry.subscribe(self._update_router_provider, + resources.ROUTER, events.PRECOMMIT_UPDATE) + registry.subscribe(self._clear_router_provider, + resources.ROUTER, events.PRECOMMIT_DELETE) + + def _load_drivers(self): + self.drivers, self.default_provider = ( + service_base.load_drivers(constants.L3_ROUTER_NAT, self.l3_plugin)) + # store the provider name on each driver to make finding inverse easy + for provider_name, driver in self.drivers.items(): + setattr(driver, 'name', provider_name) + + @property + def _flavor_plugin(self): + if not hasattr(self, '_flavor_plugin_ref'): + _service_plugins = manager.NeutronManager.get_service_plugins() + self._flavor_plugin_ref = _service_plugins[constants.FLAVORS] + return self._flavor_plugin_ref + + def _set_router_provider(self, resource, event, trigger, context, router, + router_db, **kwargs): + """Associates a router with a service provider. + + Association is done by flavor_id if it's specified, otherwise it will + fallback to determining which loaded driver supports the ha/distributed + attributes associated with the router. + """ + if _flavor_specified(router): + router_db.flavor_id = router['flavor_id'] + drv = self._get_provider_for_create(context, router) + _ensure_driver_supports_request(drv, router) + self._stm.add_resource_association(context, 'L3_ROUTER_NAT', + drv.name, router['id']) + + def _clear_router_provider(self, resource, event, trigger, context, + router_id, **kwargs): + """Remove the association between a router and a service provider.""" + self._stm.del_resource_associations(context, [router_id]) + + def _update_router_provider(self, resource, event, trigger, context, + router_id, router, old_router, router_db, + **kwargs): + """Handle transition between providers. + + The provider can currently be changed only by the caller updating + 'ha' and/or 'distributed' attributes. If we allow updates of flavor_id + directly in the future those requests will also land here. + """ + drv = self._get_provider_for_router(context, router_id) + new_drv = None + if _flavor_specified(router): + if router['flavor_id'] != old_router['flavor_id']: + # TODO(kevinbenton): this is currently disallowed by the API + # so we shouldn't hit it but this is a placeholder to add + # support later. + raise NotImplementedError() + + # the following is to support updating the 'ha' and 'distributed' + # attributes via the API. + try: + _ensure_driver_supports_request(drv, router) + except lib_exc.Invalid: + # the current driver does not support this request, we need to + # migrate to a new provider. populate the distributed and ha + # flags from the previous state if not in the update so we can + # determine the target provider appropriately. + # NOTE(kevinbenton): if the router is associated with a flavor + # we bail because changing the provider without changing + # the flavor will make things inconsistent. We can probably + # update the flavor automatically in the future. + if old_router['flavor_id']: + raise lib_exc.Invalid(_( + "Changing the 'ha' and 'distributed' attributes on a " + "router associated with a flavor is not supported.")) + if 'distributed' not in router: + router['distributed'] = old_router['distributed'] + if 'ha' not in router: + router['ha'] = old_router['distributed'] + new_drv = self._attrs_to_driver(router) + if new_drv: + LOG.debug("Router %(id)s migrating from %(old)s provider to " + "%(new)s provider.", {'id': router_id, 'old': drv, + 'new': new_drv}) + _ensure_driver_supports_request(new_drv, router) + # TODO(kevinbenton): notify old driver explicity of driver change + with context.session.begin(subtransactions=True): + self._stm.del_resource_associations(context, [router_id]) + self._stm.add_resource_association( + context, 'L3_ROUTER_NAT', new_drv.name, router_id) + + def _get_provider_for_router(self, context, router_id): + """Return the provider driver handle for a router id.""" + driver_name = self._stm.get_provider_names_by_resource_ids( + context, [router_id]).get(router_id) + if not driver_name: + # this is an old router that hasn't been mapped to a provider + # yet so we do this now + router = self.l3_plugin.get_router(context, router_id) + driver = self._attrs_to_driver(router) + driver_name = driver.name + self._stm.add_resource_association(context, 'L3_ROUTER_NAT', + driver_name, router_id) + return self.drivers[driver_name] + + def _get_provider_for_create(self, context, router): + """Get provider based on flavor or ha/distributed flags.""" + if not _flavor_specified(router): + return self._attrs_to_driver(router) + return self._get_l3_driver_by_flavor(context, router['flavor_id']) + + def _get_l3_driver_by_flavor(self, context, flavor_id): + """Get a provider driver handle for a given flavor_id.""" + flavor = self._flavor_plugin.get_flavor(context, flavor_id) + provider = self._flavor_plugin.get_flavor_next_provider( + context, flavor['id'])[0] + # TODO(kevinbenton): the callback framework suppresses the nice errors + # these generate when they fail to lookup. carry them through + driver = self.drivers[provider['provider']] + return driver + + def _attrs_to_driver(self, router): + """Get a provider driver handle based on the ha/distributed flags.""" + distributed = _is_distributed(router['distributed']) + ha = _is_ha(router['ha']) + drivers = self.drivers.values() + # make sure default is tried before the rest if defined + if self.default_provider: + drivers.insert(0, self.drivers[self.default_provider]) + for driver in drivers: + if _is_driver_compatible(distributed, ha, driver): + return driver + raise NotImplementedError( + _("Could not find a service provider that supports " + "distributed=%(d)s and ha=%(h)s") % {'d': distributed, 'h': ha} + ) + + +class _LegacyPlusProviderConfiguration( + provider_configuration.ProviderConfiguration): + + def __init__(self): + # loads up ha, dvr, and single_node service providers automatically. + # If an operator has setup explicit values that conflict with these, + # the operator defined values will take priority. + super(_LegacyPlusProviderConfiguration, self).__init__() + for name, driver in (('dvrha', 'dvrha.DvrHaDriver'), + ('dvr', 'dvr.DvrDriver'), ('ha', 'ha.HaDriver'), + ('single_node', 'single_node.SingleNodeDriver')): + path = 'neutron.services.l3_router.service_providers.%s' % driver + try: + self.add_provider({'service_type': constants.L3_ROUTER_NAT, + 'name': name, 'driver': path, + 'default': False}) + except lib_exc.Invalid: + LOG.debug("Could not add L3 provider '%s', it may have " + "already been explicitly defined.", name) + + +def _is_driver_compatible(distributed, ha, driver): + if not driver.distributed_support.is_compatible(distributed): + return False + if not driver.ha_support.is_compatible(ha): + return False + return True + + +def _is_distributed(distributed_attr): + if distributed_attr is False: + return False + if distributed_attr == lib_const.ATTR_NOT_SPECIFIED: + return cfg.CONF.router_distributed + return True + + +def _is_ha(ha_attr): + if ha_attr is False: + return False + if ha_attr == lib_const.ATTR_NOT_SPECIFIED: + return cfg.CONF.l3_ha + return True + + +def _flavor_specified(router): + return ('flavor_id' in router and + router['flavor_id'] != lib_const.ATTR_NOT_SPECIFIED) + + +def _ensure_driver_supports_request(drv, router_body): + r = router_body + for key, attr in (('distributed', 'distributed_support'), + ('ha', 'ha_support')): + flag = r.get(key) + if flag not in [True, False]: + continue # not specified in body + if not getattr(drv, attr).is_compatible(flag): + raise lib_exc.Invalid( + _("Provider %(name)s does not support %(key)s=%(flag)s") + % dict(name=drv.name, key=key, flag=flag)) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/dvrha.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/dvrha.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/dvrha.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/dvrha.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,22 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.l3_router.service_providers import base +from neutron.services.l3_router.service_providers import dvr +from neutron.services.l3_router.service_providers import ha + + +class DvrHaDriver(dvr.DvrDriver, ha.HaDriver): + ha_support = base.MANDATORY + dvr_support = base.MANDATORY diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/dvr.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/dvr.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/dvr.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/dvr.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,19 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.l3_router.service_providers import base + + +class DvrDriver(base.L3ServiceProvider): + distributed_support = base.MANDATORY diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/ha.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/ha.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/ha.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/ha.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,19 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.l3_router.service_providers import base + + +class HaDriver(base.L3ServiceProvider): + ha_support = base.MANDATORY diff -Nru neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/single_node.py neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/single_node.py --- neutron-9.0.0~b2~dev280/neutron/services/l3_router/service_providers/single_node.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/l3_router/service_providers/single_node.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,19 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.l3_router.service_providers import base + + +class SingleNodeDriver(base.L3ServiceProvider): + """Provider for single L3 agent routers.""" diff -Nru neutron-9.0.0~b2~dev280/neutron/services/metering/agents/metering_agent.py neutron-9.0.0~b3~dev557/neutron/services/metering/agents/metering_agent.py --- neutron-9.0.0~b2~dev280/neutron/services/metering/agents/metering_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/metering/agents/metering_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -32,6 +32,7 @@ from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils +from neutron.conf.services import metering_agent from neutron import context from neutron import manager from neutron import service as neutron_service @@ -63,17 +64,6 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): - Opts = [ - cfg.StrOpt('driver', - default='neutron.services.metering.drivers.noop.' - 'noop_driver.NoopMeteringDriver', - help=_("Metering driver")), - cfg.IntOpt('measure_interval', default=30, - help=_("Interval between two metering measures")), - cfg.IntOpt('report_interval', default=300, - help=_("Interval between two metering reports")), - ] - def __init__(self, host, conf=None): self.conf = conf or cfg.CONF self._load_drivers() @@ -151,7 +141,6 @@ label_id = label['id'] self.label_tenant_id[label_id] = tenant_id - tenant_id = self.label_tenant_id.get accs = self._get_traffic_counters(self.context, self.routers.values()) if not accs: return @@ -290,7 +279,7 @@ def main(): conf = cfg.CONF - conf.register_opts(MeteringAgent.Opts) + metering_agent.register_metering_agent_opts() config.register_agent_state_opts_helper(conf) common_config.init(sys.argv[1:]) config.setup_logging() diff -Nru neutron-9.0.0~b2~dev280/neutron/services/provider_configuration.py neutron-9.0.0~b3~dev557/neutron/services/provider_configuration.py --- neutron-9.0.0~b2~dev280/neutron/services/provider_configuration.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/provider_configuration.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,9 +17,11 @@ import itertools import os +from neutron.conf.services import provider_configuration as prov_config from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging +from oslo_log import versionutils import stevedore from neutron._i18n import _, _LW @@ -29,14 +31,10 @@ SERVICE_PROVIDERS = 'neutron.service_providers' -serviceprovider_opts = [ - cfg.MultiStrOpt('service_provider', default=[], - help=_('Defines providers for advanced services ' - 'using the format: ' - '::[:default]')) -] +# TODO(HenryG): use MovedGlobals to deprecate this. +serviceprovider_opts = prov_config.serviceprovider_opts -cfg.CONF.register_opts(serviceprovider_opts, 'service_providers') +prov_config.register_service_provider_opts() class NeutronModule(object): @@ -66,7 +64,7 @@ def ini(self, neutron_dir=None): if self.repo['ini'] is None: ini_file = cfg.ConfigOpts() - ini_file.register_opts(serviceprovider_opts, 'service_providers') + prov_config.register_service_provider_opts(ini_file) if neutron_dir is not None: neutron_dirs = [neutron_dir] @@ -116,6 +114,11 @@ # necessary, if modules are loaded on the fly (DevStack may # be an example) if not providers: + versionutils.report_deprecated_feature( + LOG, + _LW('Implicit loading of service providers from ' + 'neutron_*.conf files is deprecated and will be removed ' + 'in Ocata release.')) providers = self.ini().service_providers.service_provider return providers diff -Nru neutron-9.0.0~b2~dev280/neutron/services/qos/notification_drivers/manager.py neutron-9.0.0~b3~dev557/neutron/services/qos/notification_drivers/manager.py --- neutron-9.0.0~b2~dev280/neutron/services/qos/notification_drivers/manager.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/qos/notification_drivers/manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,17 +12,12 @@ from oslo_config import cfg from oslo_log import log as logging +from neutron.conf.services import qos_driver_manager as qos_mgr from neutron._i18n import _, _LI from neutron import manager QOS_DRIVER_NAMESPACE = 'neutron.qos.notification_drivers' -QOS_PLUGIN_OPTS = [ - cfg.ListOpt('notification_drivers', - default=['message_queue'], - help=_('Drivers list to use to send the update notification')), -] - -cfg.CONF.register_opts(QOS_PLUGIN_OPTS, "qos") +qos_mgr.register_qos_plugin_opts() LOG = logging.getLogger(__name__) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/qos/notification_drivers/message_queue.py neutron-9.0.0~b3~dev557/neutron/services/qos/notification_drivers/message_queue.py --- neutron-9.0.0~b2~dev280/neutron/services/qos/notification_drivers/message_queue.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/qos/notification_drivers/message_queue.py 2016-08-03 20:10:34.000000000 +0000 @@ -53,7 +53,7 @@ pass def update_policy(self, context, policy): - self.notification_api.push(context, policy, events.UPDATED) + self.notification_api.push(context, [policy], events.UPDATED) def delete_policy(self, context, policy): - self.notification_api.push(context, policy, events.DELETED) + self.notification_api.push(context, [policy], events.DELETED) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/qos/qos_consts.py neutron-9.0.0~b3~dev557/neutron/services/qos/qos_consts.py --- neutron-9.0.0~b2~dev280/neutron/services/qos/qos_consts.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/qos/qos_consts.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,8 +14,8 @@ # under the License. RULE_TYPE_BANDWIDTH_LIMIT = 'bandwidth_limit' -RULE_TYPE_DSCP_MARK = 'dscp_marking' -VALID_RULE_TYPES = [RULE_TYPE_BANDWIDTH_LIMIT, RULE_TYPE_DSCP_MARK] +RULE_TYPE_DSCP_MARKING = 'dscp_marking' +VALID_RULE_TYPES = [RULE_TYPE_BANDWIDTH_LIMIT, RULE_TYPE_DSCP_MARKING] QOS_POLICY_ID = 'qos_policy_id' diff -Nru neutron-9.0.0~b2~dev280/neutron/services/qos/qos_plugin.py neutron-9.0.0~b3~dev557/neutron/services/qos/qos_plugin.py --- neutron-9.0.0~b2~dev280/neutron/services/qos/qos_plugin.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/qos/qos_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -72,8 +72,7 @@ """ policy_data = policy['policy'] policy_obj = policy_object.QosPolicy(context, id=policy_id) - policy_obj.update_nonidentifying_fields(policy_data, - reset_changes=True) + policy_obj.update_fields(policy_data, reset_changes=True) policy_obj.update() self.notification_driver_manager.update_policy(context, policy_obj) return policy_obj @@ -150,13 +149,13 @@ return rule_type_object.QosRuleType.get_objects(**filters) @db_base_plugin_common.convert_result_to_dict - def create_policy_rule(self, context, rule_obj, policy_id, rule_data): + def create_policy_rule(self, context, rule_cls, policy_id, rule_data): """Create a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context - :param rule_obj: the rule object - :type rule_obj: a class from the rule_object (qos.objects.rule) module + :param rule_cls: the rule object class + :type rule_cls: a class from the rule_object (qos.objects.rule) module :param policy_id: the id of the QosPolicy for which to create the rule :type policy_id: str uuid :param rule_data: the rule data to be applied @@ -164,27 +163,27 @@ :returns: a QoS policy rule object """ - rule_type = rule_obj.rule_type + rule_type = rule_cls.rule_type rule_data = rule_data[rule_type + '_rule'] with db_api.autonested_transaction(context.session): # Ensure that we have access to the policy. policy = self._get_policy_obj(context, policy_id) - rule = rule_obj(context, qos_policy_id=policy_id, **rule_data) + rule = rule_cls(context, qos_policy_id=policy_id, **rule_data) rule.create() policy.reload_rules() self.notification_driver_manager.update_policy(context, policy) return rule @db_base_plugin_common.convert_result_to_dict - def update_policy_rule(self, context, rule_obj, rule_id, policy_id, + def update_policy_rule(self, context, rule_cls, rule_id, policy_id, rule_data): """Update a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context - :param rule_obj: the rule object - :type rule_obj: a class from the rule_object (qos.objects.rule) module + :param rule_cls: the rule object class + :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to update :type rule_id: str uuid :param policy_id: the id of the rule's policy @@ -194,7 +193,7 @@ :returns: a QoS policy rule object """ - rule_type = rule_obj.rule_type + rule_type = rule_cls.rule_type rule_data = rule_data[rule_type + '_rule'] with db_api.autonested_transaction(context.session): @@ -202,20 +201,20 @@ policy = self._get_policy_obj(context, policy_id) # Ensure the rule belongs to the policy. policy.get_rule_by_id(rule_id) - rule = rule_obj(context, id=rule_id) - rule.update_nonidentifying_fields(rule_data, reset_changes=True) + rule = rule_cls(context, id=rule_id) + rule.update_fields(rule_data, reset_changes=True) rule.update() policy.reload_rules() self.notification_driver_manager.update_policy(context, policy) return rule - def delete_policy_rule(self, context, rule_obj, rule_id, policy_id): + def delete_policy_rule(self, context, rule_cls, rule_id, policy_id): """Delete a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context - :param rule_obj: the rule object - :type rule_obj: a class from the rule_object (qos.objects.rule) module + :param rule_cls: the rule object class + :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QosPolicy Rule to delete :type rule_id: str uuid :param policy_id: the id of the rule's policy @@ -233,14 +232,14 @@ @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict - def get_policy_rule(self, context, rule_obj, rule_id, policy_id, + def get_policy_rule(self, context, rule_cls, rule_id, policy_id, fields=None): """Get a QoS policy rule. :param context: neutron api request context :type context: neutron.context.Context - :param rule_obj: the rule object - :type rule_obj: a class from the rule_object (qos.objects.rule) module + :param rule_cls: the rule object class + :type rule_cls: a class from the rule_object (qos.objects.rule) module :param rule_id: the id of the QoS policy rule to get :type rule_id: str uuid :param policy_id: the id of the rule's policy @@ -252,7 +251,7 @@ with db_api.autonested_transaction(context.session): # Ensure we have access to the policy. self._get_policy_obj(context, policy_id) - rule = rule_obj.get_object(context, id=rule_id) + rule = rule_cls.get_object(context, id=rule_id) if not rule: raise n_exc.QosRuleNotFound(policy_id=policy_id, rule_id=rule_id) return rule @@ -260,15 +259,15 @@ # TODO(QoS): enforce rule types when accessing rule objects @db_base_plugin_common.filter_fields @db_base_plugin_common.convert_result_to_dict - def get_policy_rules(self, context, rule_obj, policy_id, filters=None, + def get_policy_rules(self, context, rule_cls, policy_id, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): """Get QoS policy rules. :param context: neutron api request context :type context: neutron.context.Context - :param rule_obj: the rule object - :type rule_obj: a class from the rule_object (qos.objects.rule) module + :param rule_cls: the rule object class + :type rule_cls: a class from the rule_object (qos.objects.rule) module :param policy_id: the id of the QosPolicy for which to get rules :type policy_id: str uuid @@ -280,4 +279,4 @@ filters = filters or dict() filters[qos_consts.QOS_POLICY_ID] = policy_id pager = base_obj.Pager(sorts, limit, page_reverse, marker) - return rule_obj.get_objects(context, _pager=pager, **filters) + return rule_cls.get_objects(context, _pager=pager, **filters) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/revisions/revision_plugin.py neutron-9.0.0~b3~dev557/neutron/services/revisions/revision_plugin.py --- neutron-9.0.0~b2~dev280/neutron/services/revisions/revision_plugin.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/revisions/revision_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,112 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from sqlalchemy import event +from sqlalchemy.orm import exc +from sqlalchemy.orm import session as se + +from neutron._i18n import _, _LW +from neutron.db import db_base_plugin_v2 +from neutron.db import standard_attr +from neutron.extensions import revisions +from neutron.services import service_base + +LOG = logging.getLogger(__name__) + + +class RevisionPlugin(service_base.ServicePluginBase): + """Plugin to populate revision numbers into standard attr resources.""" + + supported_extension_aliases = ['revisions'] + + def __init__(self): + super(RevisionPlugin, self).__init__() + for resource in revisions.RESOURCES: + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + resource, [self.extend_resource_dict_revision]) + event.listen(se.Session, 'before_flush', self.bump_revisions) + + def bump_revisions(self, session, context, instances): + # bump revision number for any updated objects in the session + for obj in session.dirty: + if isinstance(obj, standard_attr.HasStandardAttributes): + obj.bump_revision() + + # see if any created/updated/deleted objects bump the revision + # of another object + objects_with_related_revisions = [ + o for o in session.deleted | session.dirty | session.new + if getattr(o, 'revises_on_change', ()) + ] + for obj in objects_with_related_revisions: + self._bump_related_revisions(session, obj) + + def _bump_related_revisions(self, session, obj): + for revises_col in getattr(obj, 'revises_on_change', ()): + try: + related_obj = self._find_related_obj(session, obj, revises_col) + if not related_obj: + LOG.warning(_LW("Could not find related %(col)s for " + "resource %(obj)s to bump revision."), + {'obj': obj, 'col': revises_col}) + continue + # if related object revises others, bump those as well + self._bump_related_revisions(session, related_obj) + # no need to bump revisions on related objects being deleted + if related_obj not in session.deleted: + related_obj.bump_revision() + except exc.ObjectDeletedError: + # object was in session but another writer deleted it + pass + + def get_plugin_type(self): + return "revision_plugin" + + def get_plugin_description(self): + return "Adds revision numbers to resources." + + def extend_resource_dict_revision(self, plugin, resource_res, resource_db): + resource_res['revision'] = resource_db.revision_number + + def _find_related_obj(self, session, obj, relationship_col): + """Find a related object for an object based on relationship column. + + Given a relationship column, find the object that corresponds to it + either in the current session or by looking it up if it's not present. + """ + # first check to see if it's directly attached to the object already + related_obj = getattr(obj, relationship_col) + if related_obj: + return related_obj + rel = getattr(obj.__class__, relationship_col) # get relationship + local_rel_col = list(rel.property.local_columns)[0] + if len(rel.property.local_columns) > 1: + raise RuntimeError(_("Bumping revisions with composite foreign " + "keys not supported")) + related_model = rel.property.mapper.class_ + pk = rel.property.mapper.primary_key[0] + rel_id = getattr(obj, local_rel_col.name) + if not rel_id: + return None + for session_obj in session: + if not isinstance(session_obj, related_model): + continue + if getattr(session_obj, pk.name) == rel_id: + return session_obj + # object isn't in session so we have to query for it + related_obj = ( + session.query(related_model).filter(pk == rel_id). + first() + ) + return related_obj diff -Nru neutron-9.0.0~b2~dev280/neutron/services/segments/db.py neutron-9.0.0~b3~dev557/neutron/services/segments/db.py --- neutron-9.0.0~b2~dev280/neutron/services/segments/db.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/segments/db.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,27 +18,26 @@ import functools from neutron_lib import constants +from neutron_lib import exceptions as n_exc +from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers -from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc -from neutron.api.v2 import attributes from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources +from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db import model_base from neutron.db import segments_db as db from neutron.extensions import segment as extension +from neutron import manager from neutron.services.segments import exceptions -LOG = logging.getLogger(__name__) - - class SegmentHostMapping(model_base.BASEV2): segment_id = sa.Column(sa.String(36), @@ -60,15 +59,6 @@ cascade='delete')) -def _extend_subnet_dict_binding(plugin, subnet_res, subnet_db): - subnet_res['segment_id'] = subnet_db.get('segment_id') - - -# Register dict extend functions for subnets -common_db_mixin.CommonDbMixin.register_dict_extend_funcs( - attributes.SUBNETS, [_extend_subnet_dict_binding]) - - class SegmentDbMixin(common_db_mixin.CommonDbMixin): """Mixin class to add segment.""" @@ -77,7 +67,10 @@ 'network_id': segment_db['network_id'], db.PHYSICAL_NETWORK: segment_db[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_db[db.NETWORK_TYPE], - db.SEGMENTATION_ID: segment_db[db.SEGMENTATION_ID]} + db.SEGMENTATION_ID: segment_db[db.SEGMENTATION_ID], + 'hosts': [mapping.host for mapping in + segment_db.segment_host_mapping], + 'segment_index': segment_db['segment_index']} return self._fields(res, fields) def _get_segment(self, context, segment_id): @@ -106,8 +99,32 @@ db.PHYSICAL_NETWORK: physical_network, db.NETWORK_TYPE: network_type, db.SEGMENTATION_ID: segmentation_id} + # Calculate the index of segment + segment_index = 0 + segments = self.get_segments( + context, + filters={'network_id': [network_id]}, + fields=['segment_index'], + sorts=[('segment_index', True)]) + if segments: + # NOTE(xiaohhui): The new index is the last index + 1, this + # may casue discontinuous segment_index. But segment_index + # can functionally work as the order index for segments. + segment_index = (segments[-1].get('segment_index') + 1) + args['segment_index'] = segment_index + new_segment = db.NetworkSegment(**args) - context.session.add(new_segment) + try: + context.session.add(new_segment) + context.session.flush([new_segment]) + except db_exc.DBReferenceError: + raise n_exc.NetworkNotFound(net_id=network_id) + # Do some preliminary operations before commiting the segment to db + registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, self, + context=context, segment=new_segment) + + registry.notify(resources.SEGMENT, events.AFTER_CREATE, self, + context=context, segment=new_segment) return self._make_segment_dict(new_segment) @@ -148,13 +165,36 @@ filters=filters) @log_helpers.log_method_call + def get_segments_by_hosts(self, context, hosts): + if not hosts: + return [] + query = context.session.query(SegmentHostMapping).filter( + SegmentHostMapping.host.in_(hosts)) + return list({mapping.segment_id for mapping in query}) + + @log_helpers.log_method_call def delete_segment(self, context, uuid): """Delete an existing segment.""" + segment = self.get_segment(context, uuid) + # Do some preliminary operations before deleting the segment + registry.notify(resources.SEGMENT, events.BEFORE_DELETE, + self.delete_segment, context=context, + segment=segment) + + # Delete segment in DB with context.session.begin(subtransactions=True): query = self._model_query(context, db.NetworkSegment) query = query.filter(db.NetworkSegment.id == uuid) if 0 == query.delete(): raise exceptions.SegmentNotFound(segment_id=uuid) + # Do some preliminary operations before deleting segment in db + registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE, + self.delete_segment, context=context, + segment=segment) + + registry.notify(resources.SEGMENT, events.AFTER_DELETE, + self.delete_segment, context=context, + segment=segment) def update_segment_host_mapping(context, host, current_segment_ids): @@ -168,11 +208,21 @@ host=host)) stale_segment_ids = previous_segment_ids - current_segment_ids if stale_segment_ids: - context.session.query(SegmentHostMapping).filter( + segments_host_query.filter( SegmentHostMapping.segment_id.in_( stale_segment_ids)).delete(synchronize_session=False) +def get_hosts_mapped_with_segments(context): + """Get hosts that are mapped with segments. + + L2 providers can use this method to get an overview of SegmentHostMapping, + and then delete the stale SegmentHostMapping. + """ + query = context.session.query(SegmentHostMapping.host) + return {row.host for row in query} + + def _get_phys_nets(agent): configurations_dict = agent.get('configurations', {}) mappings = configurations_dict.get('bridge_mappings', {}) @@ -183,6 +233,10 @@ reported_hosts = set() +# NOTE: Module level variable of segments plugin. It should be removed once +# segments becomes a default plugin. +segments_plugin = None + def get_segments_with_phys_nets(context, phys_nets): """Get segments from physical networks. @@ -200,6 +254,14 @@ return segments +def map_segment_to_hosts(context, segment_id, hosts): + """Map segment to a collection of hosts.""" + with db_api.autonested_transaction(context.session): + for host in hosts: + context.session.add(SegmentHostMapping(segment_id=segment_id, + host=host)) + + def _update_segment_host_mapping_for_agent(resource, event, trigger, context, host, plugin, agent): check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None) @@ -219,6 +281,41 @@ update_segment_host_mapping(context, host, current_segment_ids) +def _add_segment_host_mapping_for_segment(resource, event, trigger, + context, segment): + if not context.session.is_active: + # The session might be in partial rollback state, due to errors in + # peer callback. In that case, there is no need to add the mapping. + # Just return here. + return + + if not segment.physical_network: + return + cp = manager.NeutronManager.get_plugin() + check_segment_for_agent = getattr(cp, 'check_segment_for_agent', None) + if not hasattr(cp, 'get_agents') or not check_segment_for_agent: + # not an agent-supporting plugin + registry.unsubscribe(_add_segment_host_mapping_for_segment, + resources.SEGMENT, events.PRECOMMIT_CREATE) + return + hosts = {agent['host'] for agent in cp.get_agents(context) + if check_segment_for_agent(segment, agent)} + map_segment_to_hosts(context, segment.id, hosts) + + +def _delete_segments_for_network(resource, event, trigger, + context, network_id): + admin_ctx = context.elevated() + global segments_plugin + if not segments_plugin: + segments_plugin = manager.NeutronManager.load_class_for_provider( + 'neutron.service_plugins', 'segments')() + segments = segments_plugin.get_segments( + admin_ctx, filters={'network_id': [network_id]}) + for segment in segments: + segments_plugin.delete_segment(admin_ctx, segment['id']) + + def subscribe(): registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, @@ -226,5 +323,10 @@ registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_UPDATE) + registry.subscribe(_add_segment_host_mapping_for_segment, + resources.SEGMENT, events.PRECOMMIT_CREATE) + registry.subscribe(_delete_segments_for_network, + resources.NETWORK, + events.PRECOMMIT_DELETE) subscribe() diff -Nru neutron-9.0.0~b2~dev280/neutron/services/segments/exceptions.py neutron-9.0.0~b3~dev557/neutron/services/segments/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/services/segments/exceptions.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/segments/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -48,3 +48,13 @@ message = _("Host %(host)s is not connected to any segments on routed " "provider network '%(network_id)s'. It should be connected " "to one.") + + +class HostNotCompatibleWithFixedIps(exceptions.Conflict): + message = _("Host %(host)s is not connected to a segment where the " + "existing fixed_ips on port %(port_id)s will function given " + "the routed network topology.") + + +class SegmentInUse(exceptions.InUse): + message = _("Segment '%(segment_id)s' cannot be deleted: %(reason)s.") diff -Nru neutron-9.0.0~b2~dev280/neutron/services/segments/plugin.py neutron-9.0.0~b3~dev557/neutron/services/segments/plugin.py --- neutron-9.0.0~b2~dev280/neutron/services/segments/plugin.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/segments/plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,19 +14,87 @@ # License for the specific language governing permissions and limitations # under the License. +from sqlalchemy.orm import session +from neutron._i18n import _ +from neutron.api.v2 import attributes +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources +from neutron.db import common_db_mixin +from neutron.db import models_v2 +from neutron.extensions import ip_allocation +from neutron.extensions import l2_adjacency from neutron.extensions import segment +from neutron import manager from neutron.services.segments import db +from neutron.services.segments import exceptions + + +def _extend_network_dict_binding(plugin, network_res, network_db): + if not manager.NeutronManager.get_service_plugins().get('segments'): + return + + # TODO(carl_baldwin) Make this work with service subnets when it's a thing. + is_adjacent = (not network_db.subnets + or not network_db.subnets[0].segment_id) + network_res[l2_adjacency.L2_ADJACENCY] = is_adjacent + + +def _extend_subnet_dict_binding(plugin, subnet_res, subnet_db): + subnet_res['segment_id'] = subnet_db.get('segment_id') + + +def _extend_port_dict_binding(plugin, port_res, port_db): + if not manager.NeutronManager.get_service_plugins().get('segments'): + return + + value = ip_allocation.IP_ALLOCATION_IMMEDIATE + if not port_res.get('fixed_ips'): + # NOTE Only routed network ports have deferred allocation. Check if it + # is routed by looking for subnets associated with segments. + object_session = session.Session.object_session(port_db) + query = object_session.query(models_v2.Subnet) + query = query.filter_by(network_id=port_db.network_id) + query = query.filter(models_v2.Subnet.segment_id.isnot(None)) + if query.count(): + value = ip_allocation.IP_ALLOCATION_DEFERRED + port_res[ip_allocation.IP_ALLOCATION] = value class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase): _instance = None - supported_extension_aliases = ["segment"] + supported_extension_aliases = ["segment", "ip_allocation", "l2_adjacency"] + + def __init__(self): + common_db_mixin.CommonDbMixin.register_dict_extend_funcs( + attributes.NETWORKS, [_extend_network_dict_binding]) + common_db_mixin.CommonDbMixin.register_dict_extend_funcs( + attributes.SUBNETS, [_extend_subnet_dict_binding]) + common_db_mixin.CommonDbMixin.register_dict_extend_funcs( + attributes.PORTS, [_extend_port_dict_binding]) + + registry.subscribe( + self._prevent_segment_delete_with_subnet_associated, + resources.SEGMENT, + events.BEFORE_DELETE) @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance + + def _prevent_segment_delete_with_subnet_associated( + self, resource, event, trigger, context, segment): + """Raise exception if there are any subnets associated with segment.""" + segment_id = segment['id'] + query = context.session.query(models_v2.Subnet.id) + query = query.filter(models_v2.Subnet.segment_id == segment_id) + subnet_ids = [s[0] for s in query] + if subnet_ids: + reason = _("The segment is still associated with subnet(s) " + "%s") % ", ".join(subnet_ids) + raise exceptions.SegmentInUse(segment_id=segment_id, reason=reason) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/service_base.py neutron-9.0.0~b3~dev557/neutron/services/service_base.py --- neutron-9.0.0~b2~dev280/neutron/services/service_base.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/service_base.py 2016-08-03 20:10:34.000000000 +0000 @@ -20,7 +20,7 @@ from oslo_utils import importutils import six -from neutron._i18n import _, _LE, _LI +from neutron._i18n import _LE, _LI from neutron.api import extensions from neutron.db import servicetype_db as sdb from neutron.services import provider_configuration as pconf @@ -61,7 +61,7 @@ filters={'service_type': [service_type]}) ) if not providers: - msg = (_("No providers specified for '%s' service, exiting") % + msg = (_LE("No providers specified for '%s' service, exiting") % service_type) LOG.error(msg) raise SystemExit(1) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/tag/tag_plugin.py neutron-9.0.0~b3~dev557/neutron/services/tag/tag_plugin.py --- neutron-9.0.0~b2~dev280/neutron/services/tag/tag_plugin.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/tag/tag_plugin.py 2016-08-03 20:10:34.000000000 +0000 @@ -17,7 +17,6 @@ from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers -from oslo_log import log as logging from sqlalchemy.orm import exc from neutron.api.v2 import attributes @@ -28,9 +27,6 @@ from neutron.extensions import tag as tag_ext -LOG = logging.getLogger(__name__) - - resource_model_map = { attributes.NETWORKS: models_v2.Network, # other resources can be added diff -Nru neutron-9.0.0~b2~dev280/neutron/services/timestamp/timestamp_db.py neutron-9.0.0~b3~dev557/neutron/services/timestamp/timestamp_db.py --- neutron-9.0.0~b2~dev280/neutron/services/timestamp/timestamp_db.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/timestamp/timestamp_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -23,7 +23,7 @@ from sqlalchemy.orm import session as se from neutron._i18n import _LW -from neutron.db import model_base +from neutron.db import standard_attr LOG = log.getLogger(__name__) @@ -58,10 +58,10 @@ changed_since = (timeutils. normalize_time(changed_since_string)) target_model_class = list(query._mapper_adapter_map.keys())[0] - query = query.join(model_base.StandardAttribute, + query = query.join(standard_attr.StandardAttribute, target_model_class.standard_attr_id == - model_base.StandardAttribute.id).filter( - model_base.StandardAttribute.updated_at + standard_attr.StandardAttribute.id).filter( + standard_attr.StandardAttribute.updated_at >= changed_since) return query @@ -70,17 +70,17 @@ while objs_list: obj = objs_list.pop() - if (isinstance(obj, model_base.HasStandardAttributes) + if (isinstance(obj, standard_attr.HasStandardAttributes) and obj.standard_attr_id): obj.updated_at = timeutils.utcnow() def register_db_events(self): - event.listen(model_base.StandardAttribute, 'before_insert', + event.listen(standard_attr.StandardAttribute, 'before_insert', self._add_timestamp) event.listen(se.Session, 'before_flush', self.update_timestamp) def unregister_db_events(self): - self._unregister_db_event(model_base.StandardAttribute, + self._unregister_db_event(standard_attr.StandardAttribute, 'before_insert', self._add_timestamp) self._unregister_db_event(se.Session, 'before_flush', self.update_timestamp) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/callbacks.py neutron-9.0.0~b3~dev557/neutron/services/trunk/callbacks.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/callbacks.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/callbacks.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,33 @@ +# (c) Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class TrunkPayload(object): + """Payload for trunk-related callback registry notifications.""" + + def __init__(self, context, trunk_id, current_trunk=None, + original_trunk=None, subports=None): + self.context = context + self.trunk_id = trunk_id + self.current_trunk = current_trunk + self.original_trunk = original_trunk + self.subports = subports if subports else [] + + def __eq__(self, other): + return (isinstance(other, self.__class__) and + self.__dict__ == other.__dict__) + + def __ne__(self, other): + return not self.__eq__(other) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/constants.py neutron-9.0.0~b3~dev557/neutron/services/trunk/constants.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/constants.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/constants.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,68 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# Valid trunk statuses + +# The trunk is happy, yay! +# A trunk remains in ACTIVE state when updates like name or admin_status_up +# occur. It goes back to ACTIVE state from other states (e.g. BUILD) when +# logical and physical resource provisioning has completed successfully. The +# attribute ADMIN_STATE_UP is not to be confused with STATUS: the former +# indicates whether a trunk can be managed. If a trunk has admin_state_up +# equal to false, the trunk plugin will reject any user request to manage +# the trunk resources (i.e. adding/removing sub-ports). +ACTIVE_STATUS = 'ACTIVE' + +# The server has acknowledged the user request: a user has asked to either +# create a trunk or add/remove resources to a trunk, and the plugin has +# created/updated the logical resource. The request has been passed along +# to a backend, and the physical resources associated to the trunk are +# in the process of being provisioned. +PENDING_STATUS = 'PENDING' + +# A driver/backend has acknowledged the server request: once the server +# notifies the driver/backend, a trunk is in BUILD state while the +# backend provisions the trunk resources. +BUILD_STATUS = 'BUILD' + +# Should any temporary system failure occur during the provisioning process, +# a trunk is in DEGRADED state. This means that the trunk was only +# partially provisioned, and only a subset of the subports were added +# successfully to the trunk. The operation of removing/adding the faulty +# subports may be attempted as a recovery measure. +DEGRADED_STATUS = 'DEGRADED' + +# Due to unforeseen circumstances, the user request has led to a conflict, and +# the trunk cannot be provisioned correctly for a subset of subports. For +# instance, a subport belonging to a network might not be compatible with +# the current trunk configuration, or the binding process leads to a persistent +# failure. Removing the 'offending' resource may be attempted as a recovery +# measure, but readding it to the trunk should lead to the same error +# condition. A trunk in ERROR status should be brought back to a sane status +# (i.e. any state except ERROR state) before attempting to add more subports, +# therefore requests of adding more subports must be rejected to avoid +# cascading errors. +ERROR_STATUS = 'ERROR' + + +# String literals for identifying trunk resources +PARENT_PORT = 'parent_port' +SUBPORTS = 'subports' +TRUNK = 'trunk' +TRUNK_PLUGIN = 'trunk_plugin' + + +# String literals for segmentation types +VLAN = 'vlan' diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/db.py neutron-9.0.0~b3~dev557/neutron/services/trunk/db.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/db.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -# Copyright 2016 Hewlett Packard Enterprise Development Company, LP -# -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_db import exception as db_exc -from oslo_utils import uuidutils - -from neutron.services.trunk import exceptions -from neutron.services.trunk import models - - -def create_trunk(context, port_id, description=None): - """Create a trunk (with description) given the parent port uuid.""" - try: - with context.session.begin(subtransactions=True): - context.session.add( - models.Trunk( - id=uuidutils.generate_uuid(), - tenant_id=context.tenant_id, - port_id=port_id, - description=description)) - except db_exc.DBDuplicateEntry: - raise exceptions.TrunkPortInUse(port_id=port_id) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/base.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/base.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/base.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/base.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,87 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company, LP +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.services.trunk import constants as trunk_consts +from neutron.services.trunk.rpc import backend + + +class DriverBase(object): + + def __init__(self, name, interfaces, segmentation_types, + agent_type=None, can_trunk_bound_port=False): + """Instantiate a trunk driver. + + :param name: driver name. + :param interfaces: list of interfaces supported. + :param segmentation_types: list of segmentation types supported. + :param agent_type: agent type for the driver, None if agentless. + :param can_trunk_bound_port: True if trunk creation is allowed + for a bound parent port (i.e. trunk creation after VM boot). + """ + + self.name = name + self.interfaces = interfaces + self.segmentation_types = segmentation_types + self.agent_type = agent_type + self.can_trunk_bound_port = can_trunk_bound_port + registry.subscribe(self.register, + trunk_consts.TRUNK_PLUGIN, + events.AFTER_INIT) + + @abc.abstractproperty + def is_loaded(self): + """True if the driver is active for the Neutron Server. + + Implement this property to determine if your driver is actively + configured for this Neutron Server deployment, e.g. check if + core_plugin or mech_drivers config options (for ML2) is set as + required. + """ + + def is_interface_compatible(self, interface): + """True if the driver is compatible with the interface.""" + return interface in self.interfaces + + def is_agent_compatible(self, agent_type): + """True if the driver is compatible with the agent type.""" + return agent_type == self.agent_type + + def register(self, resource, event, trigger, **kwargs): + """Register the trunk driver. + + This method should be overriden so that the driver can subscribe + to the required trunk events. The driver should also advertise + itself as supported driver by calling register_driver() on the + TrunkPlugin otherwise the trunk plugin may fail to start if no + compatible configuration is found. + + External drivers must subscribe to the AFTER_INIT event for the + trunk plugin so that they can integrate without an explicit + register() method invocation. + + :param resource: neutron.services.trunk.constants.TRUNK_PLUGIN + :param event: neutron.callbacks.events.AFTER_INIT + :param trigger: neutron.service.trunks.plugin.TrunkPlugin + """ + + trigger.register_driver(self) + # Set up the server-side RPC backend if the driver is loaded, + # it is agent based, and the RPC backend is not already initialized. + if self.is_loaded and self.agent_type and not trigger.is_rpc_enabled(): + trigger.set_rpc_backend(backend.ServerSideRpcBackend()) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/__init__.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/__init__.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/__init__.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,29 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company LP +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.trunk.drivers.linuxbridge import driver as lxb_driver +from neutron.services.trunk.drivers.openvswitch import driver as ovs_driver + + +def register(): + """Load in-tree drivers for the service plugin.""" + # Enable the trunk plugin to work with ML2/OVS. Support for other + # drivers can be added similarly by executing the registration + # code at the time of plugin/mech driver initialization. There should + # be at least one compatible driver enabled in the deployment for trunk + # setup to be successful. The plugin fails to initialize if no compatible + # driver is found in the deployment. + lxb_driver.register() + ovs_driver.register() diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/linuxbridge/driver.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/linuxbridge/driver.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/linuxbridge/driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/linuxbridge/driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,53 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging + +from neutron_lib import constants + +from neutron.extensions import portbindings +from neutron.services.trunk import constants as trunk_consts +from neutron.services.trunk.drivers import base + +LOG = logging.getLogger(__name__) + +NAME = 'linuxbridge' +SUPPORTED_INTERFACES = ( + portbindings.VIF_TYPE_BRIDGE, +) +SUPPORTED_SEGMENTATION_TYPES = ( + trunk_consts.VLAN, +) + + +class LinuxBridgeDriver(base.DriverBase): + """Server-side Trunk driver for the ML2 Linux Bridge driver.""" + + @property + def is_loaded(self): + try: + return NAME in cfg.CONF.ml2.mechanism_drivers + except cfg.NoSuchOptError: + return False + + @classmethod + def create(cls): + return cls(NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, + constants.AGENT_TYPE_LINUXBRIDGE, can_trunk_bound_port=True) + + +def register(): + # NOTE(kevinbenton): the thing that is keeping this from being + # immediately garbage collected is that it registers callbacks + LinuxBridgeDriver.create() + LOG.debug("Linux bridge trunk driver initialized.") diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/agent/driver.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/agent/driver.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/agent/driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/agent/driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.api.rpc.callbacks.consumer import registry +from neutron.api.rpc.callbacks import resources +from neutron.services.trunk.rpc import agent + +LOG = logging.getLogger(__name__) + +TRUNK_SKELETON = None + + +class OVSTrunkSkeleton(agent.TrunkSkeleton): + + def __init__(self): + super(OVSTrunkSkeleton, self).__init__() + registry.unsubscribe(self.handle_trunks, resources.TRUNK) + + def handle_trunks(self, trunk, event_type): + """This method is not required by the OVS Agent driver. + + Trunk notifications are handled via local OVSDB events. + """ + raise NotImplementedError() + + def handle_subports(self, subports, event_type): + # TODO(armax): call into TrunkManager to wire the subports + LOG.debug("Event %s for subports: %s", event_type, subports) + + +def init_handler(resource, event, trigger, agent=None): + """Handler for agent init event.""" + # Set up agent-side RPC for receiving trunk events; we may want to + # make this setup conditional based on server-side capabilities. + global TRUNK_SKELETON + TRUNK_SKELETON = OVSTrunkSkeleton() diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/agent/exceptions.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/agent/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/agent/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/agent/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import exceptions as n_exc + +from neutron._i18n import _ + + +class TrunkBridgeNotFound(n_exc.NotFound): + message = _("Trunk bridge %(bridge)s could not be found.") diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,254 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +from neutron_lib import constants +from oslo_log import log as logging + +from neutron.agent.common import ovs_lib +from neutron.services.trunk.drivers.openvswitch.agent import exceptions as exc +from neutron.services.trunk import utils + +LOG = logging.getLogger(__name__) + + +def get_br_int_port_name(prefix, port_id): + """Return the OVS port name for the given port ID. + + The port name is the one that plumbs into the integration bridge. + """ + return ("%si-%s" % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] + + +def get_br_trunk_port_name(prefix, port_id): + """Return the OVS port name for the given port ID. + + The port name is the one that plumbs into the trunk bridge. + """ + return ("%st-%s" % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] + + +def get_patch_peer_attrs(peer_name, port_mac=None, port_id=None): + external_ids = {} + if port_mac: + external_ids['attached-mac'] = port_mac + if port_id: + external_ids['iface-id'] = port_id + attrs = [('type', 'patch'), + ('options', {'peer': peer_name})] + if external_ids: + attrs.append( + ('external_ids', external_ids)) + return attrs + + +class TrunkBridge(ovs_lib.OVSBridge): + + def __init__(self, trunk_id): + name = utils.gen_trunk_br_name(trunk_id) + super(TrunkBridge, self).__init__(name) + + def exists(self): + return self.bridge_exists(self.br_name) + + +class TrunkParentPort(object): + DEV_PREFIX = 'tp' + + def __init__(self, trunk_id, port_id, port_mac=None): + self.trunk_id = trunk_id + self.port_id = port_id + self.port_mac = port_mac + self.bridge = TrunkBridge(self.trunk_id) + # The name has form of tpi- + self.patch_port_int_name = get_br_int_port_name( + self.DEV_PREFIX, port_id) + # The name has form of tpt- + self.patch_port_trunk_name = get_br_trunk_port_name( + self.DEV_PREFIX, port_id) + self._transaction = None + + # TODO(jlibosva): Move nested transaction to ovs_lib + @contextlib.contextmanager + def ovsdb_transaction(self): + """Context manager for ovsdb transaction. + + The object caches whether its already in transaction and if it is, the + original transaction is returned. This behavior enables calling + manager several times while always getting the same transaction. + """ + if self._transaction: + yield self._transaction + else: + with self.bridge.ovsdb.transaction() as txn: + self._transaction = txn + try: + yield txn + finally: + self._transaction = None + + def plug(self, br_int): + """Create patch ports between trunk bridge and given bridge. + + The method creates one patch port on the given bridge side using + port mac and id as external ids. The other endpoint of patch port is + attached to the trunk bridge. Everything is done in a single + ovsdb transaction so either all operations succeed or fail. + + :param br_int: An integration bridge where peer endpoint of patch port + will be created. + + """ + # NOTE(jlibosva): osvdb is an api so it doesn't matter whether we + # use self.bridge or br_int + ovsdb = self.bridge.ovsdb + patch_int_attrs = get_patch_peer_attrs( + self.patch_port_trunk_name, self.port_mac, self.port_id) + patch_trunk_attrs = get_patch_peer_attrs(self.patch_port_int_name) + + with self.ovsdb_transaction() as txn: + txn.add(ovsdb.add_port(br_int.br_name, + self.patch_port_int_name)) + txn.add(ovsdb.db_set('Interface', self.patch_port_int_name, + *patch_int_attrs)) + txn.add(ovsdb.add_port(self.bridge.br_name, + self.patch_port_trunk_name)) + txn.add(ovsdb.db_set('Interface', self.patch_port_trunk_name, + *patch_trunk_attrs)) + + def unplug(self, bridge): + """Unplug the trunk from bridge. + + Method deletes in single ovsdb transaction the trunk bridge and patch + port on provided bridge. + + :param bridge: Bridge that has peer side of patch port for this + subport. + """ + ovsdb = self.bridge.ovsdb + with self.ovsdb_transaction() as txn: + txn.add(ovsdb.del_br(self.bridge.br_name)) + txn.add(ovsdb.del_port(self.patch_port_int_name, + bridge.br_name)) + + +class SubPort(TrunkParentPort): + # Patch port names have form of spi- or spt- respectively. + DEV_PREFIX = 'sp' + + def __init__(self, trunk_id, port_id, port_mac=None, segmentation_id=None): + super(SubPort, self).__init__(trunk_id, port_id, port_mac) + self.segmentation_id = segmentation_id + + def plug(self, br_int): + """Create patch ports between trunk bridge and given bridge. + + The method creates one patch port on the given bridge side using + port mac and id as external ids. The other endpoint of patch port is + attached to the trunk bridge. Then it sets vlan tag represented by + segmentation_id. Everything is done in a single ovsdb transaction so + either all operations succeed or fail. + + :param br_int: An integration bridge where peer endpoint of patch port + will be created. + + """ + ovsdb = self.bridge.ovsdb + with self.ovsdb_transaction() as txn: + super(SubPort, self).plug(br_int) + txn.add(ovsdb.db_set( + "Port", self.patch_port_trunk_name, + ("tag", self.segmentation_id))) + + def unplug(self, bridge): + """Unplug the sub port from the bridge. + + Method deletes in single ovsdb transaction both endpoints of patch + ports that represents the subport. + + :param bridge: Bridge that has peer side of patch port for this + subport. + """ + ovsdb = self.bridge.ovsdb + with self.ovsdb_transaction() as txn: + txn.add(ovsdb.del_port(self.patch_port_trunk_name, + self.bridge.br_name)) + txn.add(ovsdb.del_port(self.patch_port_int_name, + bridge.br_name)) + + +class TrunkManager(object): + + def __init__(self, br_int): + self.br_int = br_int + + def create_trunk(self, trunk_id, port_id, port_mac): + """Create the trunk. + + This patches the bridge for trunk_id with the integration bridge + by means of parent port identified by port_id. + + :param trunk_id: ID of the trunk. + :param port_id: ID of the parent port. + :param port_mac: the MAC address of the parent port. + :raises: TrunkBridgeNotFound -- In case trunk bridge doesn't exist. + + """ + trunk = TrunkParentPort(trunk_id, port_id, port_mac) + if not trunk.bridge.exists(): + raise exc.TrunkBridgeNotFound(bridge=trunk.bridge.br_name) + # Once the bridges are connected with the following patch ports, + # the ovs agent will recognize the ports for processing and it will + # take over the wiring process and everything that entails. + # REVISIT(rossella_s): revisit this integration part, should tighter + # control over the wiring logic for trunk ports be required. + trunk.plug(self.br_int) + + def remove_trunk(self, trunk_id, port_id): + """Remove the trunk bridge.""" + trunk = TrunkParentPort(trunk_id, port_id) + if trunk.bridge.exists(): + trunk.unplug(self.br_int) + else: + LOG.debug("Trunk bridge with ID %s doesn't exist.", trunk_id) + + def add_sub_port(self, trunk_id, port_id, port_mac, segmentation_id): + """Create a sub_port. + + :param trunk_id: ID of the trunk + :param port_id: ID of the child port + :param segmentation_id: segmentation ID associated with this sub-port + :param port_mac: MAC address of the child port + + """ + sub_port = SubPort(trunk_id, port_id, port_mac, segmentation_id) + # If creating of parent trunk bridge takes longer than API call for + # creating subport then bridge doesn't exist yet. + if not sub_port.bridge.exists(): + raise exc.TrunkBridgeNotFound(bridge=sub_port.bridge.br_name) + sub_port.plug(self.br_int) + + def remove_sub_port(self, trunk_id, port_id): + """Remove a sub_port. + + :param trunk_id: ID of the trunk + :param port_id: ID of the child port + """ + sub_port = SubPort(trunk_id, port_id) + + # Trunk bridge might have been deleted by calling delete_trunk() before + # remove_sub_port(). + if sub_port.bridge.exists(): + sub_port.unplug(self.br_int) + else: + LOG.debug("Trunk bridge with ID %s doesn't exist.", trunk_id) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/constants.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/constants.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/constants.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/constants.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,15 @@ +# (c) Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +TRUNK_BR_PREFIX = 'tbr-' diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/driver.py neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/driver.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/drivers/openvswitch/driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/drivers/openvswitch/driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,75 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants +from oslo_config import cfg +from oslo_log import log as logging + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.extensions import portbindings +from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( + constants as agent_consts) +from neutron.services.trunk import constants as trunk_consts +from neutron.services.trunk.drivers import base +from neutron.services.trunk import utils + +LOG = logging.getLogger(__name__) + +NAME = 'openvswitch' + +SUPPORTED_INTERFACES = ( + portbindings.VIF_TYPE_OVS, + portbindings.VIF_TYPE_VHOST_USER, +) + +SUPPORTED_SEGMENTATION_TYPES = ( + trunk_consts.VLAN, +) + +DRIVER = None + + +class OVSDriver(base.DriverBase): + + @property + def is_loaded(self): + try: + return NAME in cfg.CONF.ml2.mechanism_drivers + except cfg.NoSuchOptError: + return False + + @classmethod + def create(cls): + return OVSDriver(NAME, + SUPPORTED_INTERFACES, + SUPPORTED_SEGMENTATION_TYPES, + constants.AGENT_TYPE_OVS) + + +def register(): + """Register the driver.""" + global DRIVER + DRIVER = OVSDriver.create() + # To set the bridge_name in a parent port's vif_details. + registry.subscribe(vif_details_bridge_name_handler, + agent_consts.OVS_BRIDGE_NAME, + events.BEFORE_READ) + LOG.debug('Open vSwitch trunk driver registered') + + +def vif_details_bridge_name_handler(resource, event, set_br_name, **kwargs): + """If port is a trunk port, generate a bridge_name for its vif_details.""" + port = kwargs['port'] + if 'trunk_details' in port: + set_br_name(utils.gen_trunk_br_name(port['trunk_details']['trunk_id'])) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/exceptions.py neutron-9.0.0~b3~dev557/neutron/services/trunk/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/exceptions.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/exceptions.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,3 +24,51 @@ class TrunkNotFound(n_exc.NotFound): message = _("Trunk %(trunk_id)s could not be found.") + + +class SubPortNotFound(n_exc.NotFound): + message = _("SubPort on trunk %(trunk_id)s with parent port %(port_id)s " + "could not be found.") + + +class DuplicateSubPort(n_exc.InUse): + message = _("segmentation_type %(segmentation_type)s and segmentation_id " + "%(segmentation_id)s already in use on trunk %(trunk_id)s.") + + +class ParentPortInUse(n_exc.InUse): + message = _("Port %(port_id)s is currently in use and is not " + "eligible for use as a parent port.") + + +class PortInUseAsTrunkParent(n_exc.InUse): + message = _("Port %(port_id)s is currently a parent port " + "for trunk %(trunk_id)s.") + + +class PortInUseAsSubPort(n_exc.InUse): + message = _("Port %(port_id)s is currently a subport for " + "trunk %(trunk_id)s.") + + +class TrunkInUse(n_exc.InUse): + message = _("Trunk %(trunk_id)s is currently in use.") + + +class TrunkDisabled(n_exc.Conflict): + message = _("Trunk %(trunk_id)s is currently disabled.") + + +class TrunkInErrorState(n_exc.Conflict): + message = _("Trunk %(trunk_id)s is in error state. Attempt " + "to resolve the error condition before proceeding.") + + +class IncompatibleTrunkPluginConfiguration(n_exc.NeutronException): + message = _("Cannot load trunk plugin: no compatible core plugin " + "configuration is found.") + + +class TrunkPluginDriverConflict(n_exc.Conflict): + message = _("A misconfiguration in the environment prevents the " + "operation from completing, please, contact the admin.") diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/__init__.py neutron-9.0.0~b3~dev557/neutron/services/trunk/__init__.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/__init__.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/__init__.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,15 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import neutron.services.trunk.validators # noqa diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/models.py neutron-9.0.0~b3~dev557/neutron/services/trunk/models.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/models.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/models.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,24 +14,37 @@ # under the License. import sqlalchemy as sa +from sqlalchemy import sql +from neutron.api.v2 import attributes from neutron.db import model_base from neutron.db import models_v2 +from neutron.db import standard_attr +from neutron.services.trunk import constants -class Trunk(model_base.HasStandardAttributes, model_base.BASEV2, - model_base.HasId, model_base.HasTenant): +class Trunk(standard_attr.HasStandardAttributes, model_base.BASEV2, + model_base.HasId, model_base.HasProject): + admin_state_up = sa.Column( + sa.Boolean(), nullable=False, server_default=sql.true()) + name = sa.Column(sa.String(attributes.NAME_MAX_LEN)) port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), nullable=False, unique=True) + status = sa.Column( + sa.String(16), nullable=False, server_default=constants.ACTIVE_STATUS) + port = sa.orm.relationship( models_v2.Port, backref=sa.orm.backref('trunk_port', lazy='joined', uselist=False, cascade='delete')) + sub_ports = sa.orm.relationship( + 'SubPort', lazy='joined', uselist=True, cascade="all, delete-orphan") + class SubPort(model_base.BASEV2): diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/plugin.py neutron-9.0.0~b3~dev557/neutron/services/trunk/plugin.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/plugin.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,348 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_log import log as logging +from oslo_utils import uuidutils + +from neutron.api.v2 import attributes +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources +from neutron.db import api as db_api +from neutron.db import common_db_mixin +from neutron.db import db_base_plugin_common +from neutron.db import db_base_plugin_v2 +from neutron.objects import base as objects_base +from neutron.objects import trunk as trunk_objects +from neutron.services import service_base +from neutron.services.trunk import callbacks +from neutron.services.trunk import constants +from neutron.services.trunk import drivers +from neutron.services.trunk import exceptions as trunk_exc +from neutron.services.trunk import rules + +LOG = logging.getLogger(__name__) + + +def _extend_port_trunk_details(core_plugin, port_res, port_db): + """Add trunk details to a port.""" + if port_db.trunk_port: + subports = [{'segmentation_id': x.segmentation_id, + 'segmentation_type': x.segmentation_type, + 'port_id': x.port_id} + for x in port_db.trunk_port.sub_ports] + trunk_details = {'trunk_id': port_db.trunk_port.id, + 'sub_ports': subports} + port_res['trunk_details'] = trunk_details + + return port_res + + +class TrunkPlugin(service_base.ServicePluginBase, + common_db_mixin.CommonDbMixin): + + supported_extension_aliases = ["trunk", "trunk-details"] + + __native_pagination_support = True + __native_sorting_support = True + + def __init__(self): + db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( + attributes.PORTS, [_extend_port_trunk_details]) + self._rpc_backend = None + self._drivers = [] + self._segmentation_types = {} + self._interfaces = set() + self._agent_types = set() + drivers.register() + registry.subscribe(rules.enforce_port_deletion_rules, + resources.PORT, events.BEFORE_DELETE) + registry.notify(constants.TRUNK_PLUGIN, events.AFTER_INIT, self) + for driver in self._drivers: + LOG.debug('Trunk plugin loaded with driver %s', driver.name) + self.check_compatibility() + + def check_compatibility(self): + """Fail to load if no compatible driver is found.""" + if not any([driver.is_loaded for driver in self._drivers]): + raise trunk_exc.IncompatibleTrunkPluginConfiguration() + + def set_rpc_backend(self, backend): + self._rpc_backend = backend + + def is_rpc_enabled(self): + return self._rpc_backend is not None + + def register_driver(self, driver): + """Register driver with trunk plugin.""" + if driver.agent_type: + self._agent_types.add(driver.agent_type) + self._interfaces = self._interfaces | set(driver.interfaces) + self._drivers.append(driver) + + @property + def registered_drivers(self): + """The registered drivers.""" + return self._drivers + + @property + def supported_interfaces(self): + """A set of supported interfaces.""" + return self._interfaces + + @property + def supported_agent_types(self): + """A set of supported agent types.""" + return self._agent_types + + def add_segmentation_type(self, segmentation_type, id_validator): + self._segmentation_types[segmentation_type] = id_validator + LOG.debug('Added support for segmentation type %s', segmentation_type) + + def validate(self, context, trunk): + """Return a valid trunk or raises an error if unable to do so.""" + trunk_details = trunk + + trunk_validator = rules.TrunkPortValidator(trunk['port_id']) + trunk_details['port_id'] = trunk_validator.validate(context) + + subports_validator = rules.SubPortsValidator( + self._segmentation_types, trunk['sub_ports'], trunk['port_id']) + trunk_details['sub_ports'] = subports_validator.validate(context) + return trunk_details + + def get_plugin_description(self): + return "Trunk port service plugin" + + @classmethod + def get_plugin_type(cls): + return "trunk" + + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_trunk(self, context, trunk_id, fields=None): + """Return information for the specified trunk.""" + return self._get_trunk(context, trunk_id) + + @db_base_plugin_common.filter_fields + @db_base_plugin_common.convert_result_to_dict + def get_trunks(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, page_reverse=False): + """Return information for available trunks.""" + filters = filters or {} + pager = objects_base.Pager(sorts=sorts, limit=limit, + page_reverse=page_reverse, marker=marker) + return trunk_objects.Trunk.get_objects(context, _pager=pager, + **filters) + + @db_base_plugin_common.convert_result_to_dict + def create_trunk(self, context, trunk): + """Create a trunk.""" + trunk = self.validate(context, trunk['trunk']) + sub_ports = [trunk_objects.SubPort( + context=context, + port_id=p['port_id'], + segmentation_id=p['segmentation_id'], + segmentation_type=p['segmentation_type']) + for p in trunk['sub_ports']] + admin_state_up = trunk.get('admin_state_up', True) + # NOTE(status_police): a trunk is created in PENDING status. Depending + # on the nature of the create request, a driver may set the status + # immediately to ACTIVE if no physical provisioning is required. + # Otherwise a transition to BUILD (or ERROR) should be expected + # depending on how the driver reacts. PRECOMMIT failures prevent the + # trunk from being created altogether. + trunk_obj = trunk_objects.Trunk(context=context, + admin_state_up=admin_state_up, + id=uuidutils.generate_uuid(), + name=trunk.get('name', ""), + tenant_id=trunk['tenant_id'], + port_id=trunk['port_id'], + status=constants.PENDING_STATUS, + sub_ports=sub_ports) + with db_api.autonested_transaction(context.session): + trunk_obj.create() + payload = callbacks.TrunkPayload(context, trunk_obj.id, + current_trunk=trunk_obj) + registry.notify( + constants.TRUNK, events.PRECOMMIT_CREATE, self, + payload=payload) + registry.notify( + constants.TRUNK, events.AFTER_CREATE, self, payload=payload) + return trunk_obj + + @db_base_plugin_common.convert_result_to_dict + def update_trunk(self, context, trunk_id, trunk): + """Update information for the specified trunk.""" + trunk_data = trunk['trunk'] + with db_api.autonested_transaction(context.session): + trunk_obj = self._get_trunk(context, trunk_id) + original_trunk = copy.deepcopy(trunk_obj) + # NOTE(status_police): a trunk status should not change during an + # update_trunk(), even in face of PRECOMMIT failures. This is + # because only name and admin_state_up are being affected, and + # these are DB properties only. + trunk_obj.update_fields(trunk_data, reset_changes=True) + trunk_obj.update() + payload = callbacks.TrunkPayload(context, trunk_id, + original_trunk=original_trunk, + current_trunk=trunk_obj) + registry.notify(constants.TRUNK, events.PRECOMMIT_UPDATE, self, + payload=payload) + registry.notify(constants.TRUNK, events.AFTER_UPDATE, self, + payload=payload) + return trunk_obj + + def delete_trunk(self, context, trunk_id): + """Delete the specified trunk.""" + with db_api.autonested_transaction(context.session): + trunk = self._get_trunk(context, trunk_id) + rules.trunk_can_be_managed(context, trunk) + trunk_port_validator = rules.TrunkPortValidator(trunk.port_id) + if not trunk_port_validator.is_bound(context): + # NOTE(status_police): when a trunk is deleted, the logical + # object disappears from the datastore, therefore there is no + # status transition involved. If PRECOMMIT failures occur, + # the trunk remains in the status where it was. + trunk.delete() + payload = callbacks.TrunkPayload(context, trunk_id, + original_trunk=trunk) + registry.notify(constants.TRUNK, events.PRECOMMIT_DELETE, self, + payload=payload) + else: + raise trunk_exc.TrunkInUse(trunk_id=trunk_id) + registry.notify(constants.TRUNK, events.AFTER_DELETE, self, + payload=payload) + + @db_base_plugin_common.convert_result_to_dict + def add_subports(self, context, trunk_id, subports): + """Add one or more subports to trunk.""" + # Check for basic validation since the request body here is not + # automatically validated by the API layer. + subports = subports['sub_ports'] + subports_validator = rules.SubPortsValidator( + self._segmentation_types, subports) + subports = subports_validator.validate(context, basic_validation=True) + added_subports = [] + + with db_api.autonested_transaction(context.session): + trunk = self._get_trunk(context, trunk_id) + rules.trunk_can_be_managed(context, trunk) + original_trunk = copy.deepcopy(trunk) + # NOTE(status_police): the trunk status should transition to + # PENDING (and consequently to BUILD and finally in ACTIVE + # or ERROR), only if it is not in ERROR status already. A user + # should attempt to resolve the ERROR condition before adding + # more subports to the trunk. Should a trunk be in PENDING or + # BUILD state (e.g. when dealing with multiple concurrent + # requests), the status is still forced to PENDING and thus + # can potentially overwrite an interleaving state change to + # ACTIVE. Eventually the driver should bring the status back + # to ACTIVE or ERROR. + if trunk.status == constants.ERROR_STATUS: + raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id) + else: + trunk.status = constants.PENDING_STATUS + trunk.update() + + for subport in subports: + obj = trunk_objects.SubPort( + context=context, + trunk_id=trunk_id, + port_id=subport['port_id'], + segmentation_type=subport['segmentation_type'], + segmentation_id=subport['segmentation_id']) + obj.create() + trunk['sub_ports'].append(obj) + added_subports.append(obj) + payload = callbacks.TrunkPayload(context, trunk_id, + current_trunk=trunk, + original_trunk=original_trunk, + subports=added_subports) + if added_subports: + registry.notify(constants.SUBPORTS, events.PRECOMMIT_CREATE, + self, payload=payload) + if added_subports: + registry.notify( + constants.SUBPORTS, events.AFTER_CREATE, self, payload=payload) + return trunk + + @db_base_plugin_common.convert_result_to_dict + def remove_subports(self, context, trunk_id, subports): + """Remove one or more subports from trunk.""" + subports = subports['sub_ports'] + with db_api.autonested_transaction(context.session): + trunk = self._get_trunk(context, trunk_id) + original_trunk = copy.deepcopy(trunk) + rules.trunk_can_be_managed(context, trunk) + + subports_validator = rules.SubPortsValidator( + self._segmentation_types, subports) + # the subports are being removed, therefore we do not need to + # enforce any specific trunk rules, other than basic validation + # of the request body. + subports = subports_validator.validate( + context, basic_validation=True, + trunk_validation=False) + + current_subports = {p.port_id: p for p in trunk.sub_ports} + removed_subports = [] + + for subport in subports: + subport_obj = current_subports.pop(subport['port_id'], None) + + if not subport_obj: + raise trunk_exc.SubPortNotFound(trunk_id=trunk_id, + port_id=subport['port_id']) + subport_obj.delete() + removed_subports.append(subport_obj) + + del trunk.sub_ports[:] + trunk.sub_ports.extend(current_subports.values()) + # NOTE(status_police): the trunk status should transition to + # PENDING irrespective of the status in which it is in to allow + # the user to resolve potential conflicts due to prior add_subports + # operations. + # Should a trunk be in PENDING or BUILD state (e.g. when dealing + # with multiple concurrent requests), the status is still forced + # to PENDING. See add_subports() for more details. + trunk.status = constants.PENDING_STATUS + trunk.update() + payload = callbacks.TrunkPayload(context, trunk_id, + current_trunk=trunk, + original_trunk=original_trunk, + subports=removed_subports) + if removed_subports: + registry.notify(constants.SUBPORTS, events.PRECOMMIT_DELETE, + self, payload=payload) + if removed_subports: + registry.notify( + constants.SUBPORTS, events.AFTER_DELETE, self, payload=payload) + return trunk + + @db_base_plugin_common.filter_fields + def get_subports(self, context, trunk_id, fields=None): + """Return subports for the specified trunk.""" + trunk = self.get_trunk(context, trunk_id) + return {'sub_ports': trunk['sub_ports']} + + def _get_trunk(self, context, trunk_id): + """Return the trunk object or raise if not found.""" + obj = trunk_objects.Trunk.get_object(context, id=trunk_id) + if obj is None: + raise trunk_exc.TrunkNotFound(trunk_id=trunk_id) + + return obj diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/agent.py neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/agent.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/agent.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,101 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import oslo_messaging + +from neutron.api.rpc.callbacks.consumer import registry +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron.common import rpc as n_rpc +from neutron.services.trunk.rpc import constants as trunk_consts + +# This module contains stub (client-side) and skeleton (server-side) +# proxy code that executes in the Neutron L2 Agent process space. This +# is needed if trunk service plugin drivers have a remote component +# (e.g. agent), that needs to communicate with the Neutron Server. + +# The Agent side exposes the following remote methods: +# +# - update methods to learn about a trunk and its subports: these +# methods are used by the server to tell the agent about trunk +# updates; agents may selectively choose to listen to either +# trunk or subports updates or both. +# +# For server-side stub and skeleton proxy code, please look at server.py + + +class TrunkSkeleton(object): + """Skeleton proxy code for server->agent communication.""" + + def __init__(self): + registry.subscribe(self.handle_trunks, resources.TRUNK) + registry.subscribe(self.handle_subports, resources.SUBPORT) + + self._connection = n_rpc.create_connection() + endpoints = [resources_rpc.ResourcesPushRpcCallback()] + topic = resources_rpc.resource_type_versioned_topic(resources.SUBPORT) + self._connection.create_consumer(topic, endpoints, fanout=True) + topic = resources_rpc.resource_type_versioned_topic(resources.TRUNK) + self._connection.create_consumer(topic, endpoints, fanout=True) + self._connection.consume_in_threads() + + @abc.abstractmethod + def handle_trunks(self, trunks, event_type): + """Handle trunk events.""" + # if common logic may be extracted out, consider making a base + # version of this method that can be overidden by the inherited + # skeleton. + # NOTE: If trunk is not managed by the agent, the notification can + # either be ignored or cached for future use. + + @abc.abstractmethod + def handle_subports(self, subports, event_type): + """Handle subports event.""" + # if common logic may be extracted out, consider making a base + # version of this method that can be overidden by the inherited + # skeleton. + # NOTE: If the subport belongs to a trunk which the agent does not + # manage, the notification should be ignored. + + +class TrunkStub(object): + """Stub proxy code for agent->server communication.""" + # API HISTORY + # 1.0 - initial version + VERSION = '1.0' + + def __init__(self): + self.stub = resources_rpc.ResourcesPullRpcApi() + target = oslo_messaging.Target( + topic=trunk_consts.TRUNK_BASE_TOPIC, + version=self.VERSION, + namespace=trunk_consts.TRUNK_BASE_NAMESPACE) + self.rpc_client = n_rpc.get_client(target) + + def get_trunk_details(self, context, parent_port_id): + """Get information about the trunk for the given parent port.""" + return self.stub.pull(context, resources.TRUNK, parent_port_id) + + def update_trunk_status(self, context, trunk_id, status): + """Update the trunk status to reflect outcome of data plane wiring.""" + return self.rpc_client.prepare().call( + context, 'update_trunk_status', + trunk_id=trunk_id, status=status) + + def update_subport_bindings(self, context, subports): + """Update subport bindings to match parent port host binding.""" + return self.rpc_client.prepare().call( + context, 'update_subport_bindings', subports=subports) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/backend.py neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/backend.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/backend.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/backend.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,64 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.services.trunk import constants as trunk_consts +from neutron.services.trunk.rpc import server + +LOG = logging.getLogger(__name__) + + +class ServerSideRpcBackend(object): + """The Neutron Server RPC backend.""" + + def __init__(self): + """Initialize an RPC backend for the Neutron Server.""" + self._skeleton = server.TrunkSkeleton() + self._stub = server.TrunkStub() + + # Set up listeners to trunk events: they dispatch RPC messages + # to agents as needed. These are designed to work with any + # agent-based driver that may integrate with the trunk service + # plugin, e.g. linux bridge or ovs. + for event in (events.AFTER_CREATE, events.AFTER_DELETE): + registry.subscribe(self.process_event, + trunk_consts.TRUNK, + event) + registry.subscribe(self.process_event, + trunk_consts.SUBPORTS, + event) + LOG.debug("RPC backend initialized for trunk plugin") + + def process_event(self, resource, event, trunk_plugin, payload): + """Emit RPC notifications to registered subscribers.""" + context = payload.context + LOG.debug("RPC notification needed for trunk %s", payload.trunk_id) + if resource == trunk_consts.SUBPORTS: + payload = payload.subports + method = { + events.AFTER_CREATE: self._stub.subports_added, + events.AFTER_DELETE: self._stub.subports_deleted, + } + elif resource == trunk_consts.TRUNK: + # On AFTER_DELETE event, current_trunk is None + payload = payload.current_trunk or payload.original_trunk + method = { + events.AFTER_CREATE: self._stub.trunk_created, + events.AFTER_DELETE: self._stub.trunk_deleted, + } + LOG.debug("Emitting event %s for resource %s", event, resource) + method[event](context, payload) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/constants.py neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/constants.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/constants.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/constants.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,15 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +TRUNK_BASE_TOPIC = 'trunk' +TRUNK_BASE_NAMESPACE = 'trunk' diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/server.py neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/server.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/rpc/server.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/rpc/server.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,133 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +from oslo_log import log as logging +import oslo_messaging + +from neutron.api.rpc.callbacks import events +from neutron.api.rpc.callbacks.producer import registry +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron.common import rpc as n_rpc +from neutron.db import api as db_api +from neutron.extensions import portbindings +from neutron import manager +from neutron.objects import trunk as trunk_objects +from neutron.services.trunk.rpc import constants + +LOG = logging.getLogger(__name__) + +# This module contains stub (client-side) and skeleton (server-side) +# proxy code that executes in the Neutron server process space. This +# is needed if any of the trunk service plugin drivers has a remote +# component (e.g. agent), that needs to communicate with the Neutron +# Server. + +# The Server side exposes the following remote methods: +# +# - lookup method to retrieve trunk details: used by the agent to learn +# about the trunk. +# - update methods for trunk and its subports: used by the agent to +# inform the server about local trunk status changes. +# +# For agent-side stub and skeleton proxy code, please look at agent.py + + +def trunk_by_port_provider(resource, port_id, context, **kwargs): + """Provider callback to supply trunk information by parent port.""" + return trunk_objects.Trunk.get_object(context, port_id=port_id) + + +class TrunkSkeleton(object): + """Skeleton proxy code for agent->server communication.""" + + # API version history: + # 1.0 Initial version + target = oslo_messaging.Target(version='1.0', + namespace=constants.TRUNK_BASE_NAMESPACE) + + _core_plugin = None + _trunk_plugin = None + + def __init__(self): + # Used to provide trunk lookups for the agent. + registry.provide(trunk_by_port_provider, resources.TRUNK) + self._connection = n_rpc.create_connection() + self._connection.create_consumer( + constants.TRUNK_BASE_TOPIC, [self], fanout=False) + self._connection.consume_in_threads() + + @property + def core_plugin(self): + # TODO(armax): consider getting rid of this property if we + # can get access to the Port object + if not self._core_plugin: + self._core_plugin = manager.NeutronManager.get_plugin() + return self._core_plugin + + def update_subport_bindings(self, context, subports): + """Update subport bindings to match trunk host binding.""" + el = context.elevated() + ports_by_trunk_id = collections.defaultdict(list) + updated_ports = collections.defaultdict(list) + for s in subports: + ports_by_trunk_id[s['trunk_id']].append(s['port_id']) + for trunk_id, subport_ids in ports_by_trunk_id.items(): + trunk = trunk_objects.Trunk.get_object(el, id=trunk_id) + if not trunk: + LOG.debug("Trunk not found. id: %s", trunk_id) + continue + trunk_port_id = trunk.port_id + trunk_port = self.core_plugin.get_port(el, trunk_port_id) + trunk_host = trunk_port.get(portbindings.HOST_ID) + for port_id in subport_ids: + updated_port = self.core_plugin.update_port( + el, port_id, {'port': {portbindings.HOST_ID: trunk_host}}) + # NOTE(fitoduarte): consider trimming down the content + # of the port data structure. + updated_ports[trunk_id].append(updated_port) + return updated_ports + + def update_trunk_status(self, context, trunk_id, status): + """Update the trunk status to reflect outcome of data plane wiring.""" + with db_api.autonested_transaction(context.session): + trunk = trunk_objects.Trunk.get_object(context, id=trunk_id) + if trunk: + trunk.status = status + trunk.update() + + +class TrunkStub(object): + """Stub proxy code for server->agent communication.""" + + def __init__(self): + self._resource_rpc = resources_rpc.ResourcesPushRpcApi() + + def trunk_created(self, context, trunk): + """Tell the agent about a trunk being created.""" + self._resource_rpc.push(context, [trunk], events.CREATED) + + def trunk_deleted(self, context, trunk): + """Tell the agent about a trunk being deleted.""" + self._resource_rpc.push(context, [trunk], events.DELETED) + + def subports_added(self, context, subports): + """Tell the agent about new subports to add.""" + self._resource_rpc.push(context, subports, events.CREATED) + + def subports_deleted(self, context, subports): + """Tell the agent about existing subports to remove.""" + self._resource_rpc.push(context, subports, events.DELETED) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/rules.py neutron-9.0.0~b3~dev557/neutron/services/trunk/rules.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/rules.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/rules.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,173 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib.api import converters +from neutron_lib.api import validators +from neutron_lib import exceptions as n_exc + +from neutron._i18n import _ +from neutron.extensions import portbindings +from neutron import manager +from neutron.objects import trunk as trunk_objects +from neutron.services.trunk import exceptions as trunk_exc +from neutron.services.trunk import utils + + +# This layer is introduced for keeping business logic and +# data persistence decoupled. + + +def trunk_can_be_managed(context, trunk): + """Validate that the trunk can be managed.""" + if not trunk.admin_state_up: + raise trunk_exc.TrunkDisabled(trunk_id=trunk.id) + + +def enforce_port_deletion_rules(resource, event, trigger, **kwargs): + """Prohibit the deletion of a port that's used in a trunk.""" + # NOTE: the ML2 plugin properly catches these exceptions when raised, but + # non-ML2 plugins might not. To address this we should move the callback + # registry notification emitted in the ML2 plugin's delete_port() higher + # up in the plugin hierarchy. + context = kwargs['context'] + port_id = kwargs['port_id'] + subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id) + if subport_obj: + raise trunk_exc.PortInUseAsSubPort(port_id=port_id, + trunk_id=subport_obj.trunk_id) + trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id) + if trunk_obj: + raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id, + trunk_id=trunk_obj.id) + + +class TrunkPortValidator(object): + + def __init__(self, port_id): + self.port_id = port_id + self._port = None + + def validate(self, context): + """Validate that the port can be used in a trunk.""" + # TODO(tidwellr): there is a chance of a race between the + # time these checks are performed and the time the trunk + # creation is executed. To be revisited, if it bites. + + # Validate that the given port_id is not used by a subport. + subports = trunk_objects.SubPort.get_objects( + context, port_id=self.port_id) + if subports: + raise trunk_exc.TrunkPortInUse(port_id=self.port_id) + + # Validate that the given port_id is not used by a trunk. + trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id) + if trunks: + raise trunk_exc.ParentPortInUse(port_id=self.port_id) + + if not self.can_be_trunked(context): + raise trunk_exc.ParentPortInUse(port_id=self.port_id) + + return self.port_id + + def is_bound(self, context): + """Return true if the port is bound, false otherwise.""" + # Validate that the given port_id does not have a port binding. + core_plugin = manager.NeutronManager.get_plugin() + self._port = core_plugin.get_port(context, self.port_id) + return bool(self._port.get(portbindings.HOST_ID)) + + def can_be_trunked(self, context): + """"Return true if a port can be trunked.""" + if not self.is_bound(context): + # An unbound port can be trunked, always. + return True + + trunk_plugin = manager.NeutronManager.get_service_plugins()['trunk'] + vif_type = self._port.get(portbindings.VIF_TYPE) + binding_host = self._port.get(portbindings.HOST_ID) + + # Determine the driver that will be in charge of the trunk: this + # can be determined based on the vif type, whether or not the + # driver is agent-based, and whether the host is running the agent + # associated to the driver itself. + drivers = [ + driver for driver in trunk_plugin.registered_drivers + if utils.is_driver_compatible( + context, driver, vif_type, binding_host) + ] + if len(drivers) > 1: + raise trunk_exc.TrunkPluginDriverConflict() + elif len(drivers) == 1: + return drivers[0].can_trunk_bound_port + else: + return False + + +class SubPortsValidator(object): + + def __init__(self, segmentation_types, subports, trunk_port_id=None): + self._segmentation_types = segmentation_types + self.subports = subports + self.trunk_port_id = trunk_port_id + + def validate(self, context, + basic_validation=False, trunk_validation=True): + """Validate that subports can be used in a trunk.""" + # Perform basic validation on subports, in case subports + # are not automatically screened by the API layer. + if basic_validation: + msg = validators.validate_subports(self.subports) + if msg: + raise n_exc.InvalidInput(error_message=msg) + if trunk_validation: + return [self._validate(context, s) for s in self.subports] + else: + return self.subports + + def _validate(self, context, subport): + # Check that the subport doesn't reference the same port_id as a + # trunk we may be in the middle of trying to create, in other words + # make the validation idiot proof. + if subport['port_id'] == self.trunk_port_id: + raise trunk_exc.ParentPortInUse(port_id=subport['port_id']) + + # If the segmentation details are missing, we will need to + # figure out defaults when the time comes to support Ironic. + # We can reasonably expect segmentation details to be provided + # in all other cases for now. + try: + segmentation_type = subport["segmentation_type"] + segmentation_id = ( + converters.convert_to_int(subport["segmentation_id"])) + except KeyError: + msg = _("Invalid subport details '%s': missing segmentation " + "information. Must specify both segmentation_id and " + "segmentation_type") % subport + raise n_exc.InvalidInput(error_message=msg) + except n_exc.InvalidInput: + msg = _("Invalid subport details: segmentation_id '%s' is " + "not an integer") % subport["segmentation_id"] + raise n_exc.InvalidInput(error_message=msg) + + if segmentation_type not in self._segmentation_types: + msg = _("Unknown segmentation_type '%s'") % segmentation_type + raise n_exc.InvalidInput(error_message=msg) + + if not self._segmentation_types[segmentation_type](segmentation_id): + msg = _("Segmentation ID '%s' is not in range") % segmentation_id + raise n_exc.InvalidInput(error_message=msg) + + trunk_validator = TrunkPortValidator(subport['port_id']) + trunk_validator.validate(context) + return subport diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/utils.py neutron-9.0.0~b3~dev557/neutron/services/trunk/utils.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,62 @@ +# (c) Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants + +from neutron.common import utils +from neutron import manager +from neutron.services.trunk.drivers.openvswitch import constants as ovs_const + + +def gen_trunk_br_name(trunk_id): + return ((ovs_const.TRUNK_BR_PREFIX + trunk_id) + [:constants.DEVICE_NAME_MAX_LEN - 1]) + + +def are_agent_types_available_on_host(context, agent_types, host): + """Return true if agent types are present on the host.""" + core_plugin = manager.NeutronManager.get_plugin() + if utils.is_extension_supported(core_plugin, 'agent'): + return bool(core_plugin.get_agents( + context.elevated(), + filters={'host': [host], 'agent_type': agent_types})) + return False + + +def is_driver_compatible(context, driver, interface, binding_host): + """Return true if the driver is compatible with the interface and host. + + There may be edge cases where a stale view or the deployment may make the + following test fail to detect the right driver in charge of the bound port. + """ + + # NOTE(armax): this logic stems from the fact that the way Neutron is + # architected we do not have a univocal mapping between VIF type and the + # Driver serving it, in that the same vif type can be supported by + # multiple drivers. A practical example of this is OVS and OVN in the + # same deployment. In order to uniquely identify the driver, we cannot + # simply look at the vif type, and we need to look at whether the host + # to which the port is bound is actually managed by one driver or the + # other. + is_interface_compatible = driver.is_interface_compatible(interface) + + # For an agentless driver, only interface compatibility is required. + if not driver.agent_type: + return is_interface_compatible + + # For an agent-based driver, both interface and agent compat is required. + return ( + is_interface_compatible and + are_agent_types_available_on_host( + context, [driver.agent_type], binding_host)) diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/validators/__init__.py neutron-9.0.0~b3~dev557/neutron/services/trunk/validators/__init__.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/validators/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/validators/__init__.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,18 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.services.trunk.validators import vlan + +# Register segmentation_type validation drivers +vlan.register() diff -Nru neutron-9.0.0~b2~dev280/neutron/services/trunk/validators/vlan.py neutron-9.0.0~b3~dev557/neutron/services/trunk/validators/vlan.py --- neutron-9.0.0~b2~dev280/neutron/services/trunk/validators/vlan.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/services/trunk/validators/vlan.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,34 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.plugins.common import utils +from neutron.services.trunk import constants as trunk_consts + +LOG = logging.getLogger(__name__) + + +def register(): + registry.subscribe(handler, trunk_consts.TRUNK_PLUGIN, + events.AFTER_INIT) + LOG.debug('Registering for trunk support') + + +def handler(resource, event, trigger, **kwargs): + trigger.add_segmentation_type( + trunk_consts.VLAN, utils.is_valid_vlan_tag) + LOG.debug('Registration complete') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/base.py neutron-9.0.0~b3~dev557/neutron/tests/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/base.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/base.py 2016-08-03 20:10:34.000000000 +0000 @@ -52,7 +52,7 @@ CONF = cfg.CONF -CONF.import_opt('state_path', 'neutron.common.config') +CONF.import_opt('state_path', 'neutron.conf.common') ROOTDIR = os.path.dirname(__file__) ETCDIR = os.path.join(ROOTDIR, 'etc') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/agents/l2_extensions.py neutron-9.0.0~b3~dev557/neutron/tests/common/agents/l2_extensions.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/agents/l2_extensions.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/agents/l2_extensions.py 2016-08-03 20:10:34.000000000 +0000 @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron.agent.linux import utils as agent_utils +from neutron.common import utils as common_utils def extract_mod_nw_tos_action(flows): @@ -36,7 +36,7 @@ expected = rule.max_kbps, rule.max_burst_kbps return bw_rule == expected - agent_utils.wait_until_true(_bandwidth_limit_rule_applied) + common_utils.wait_until_true(_bandwidth_limit_rule_applied) def wait_until_dscp_marking_rule_applied(bridge, port_vif, rule): @@ -51,4 +51,4 @@ expected = rule << 2 return dscp_mark == expected - agent_utils.wait_until_true(_dscp_marking_rule_applied) + common_utils.wait_until_true(_dscp_marking_rule_applied) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/base.py neutron-9.0.0~b3~dev557/neutron/tests/common/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/base.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/base.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,7 +14,6 @@ import functools from neutron_lib import constants as n_const -from oslo_db.sqlalchemy import test_base import testtools.testcase import unittest2.case @@ -69,21 +68,3 @@ 'is enabled, skip reason: %s' % (wrapped.__name__, e)) raise return wrapper - - -class MySQLTestCase(test_base.MySQLOpportunisticTestCase): - """Base test class for MySQL tests. - - If the MySQL db is unavailable then this test is skipped, unless - OS_FAIL_ON_MISSING_DEPS is enabled. - """ - SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') - - -class PostgreSQLTestCase(test_base.PostgreSQLOpportunisticTestCase): - """Base test class for PostgreSQL tests. - - If the PostgreSQL db is unavailable then this test is skipped, unless - OS_FAIL_ON_MISSING_DEPS is enabled. - """ - SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/conn_testers.py neutron-9.0.0~b3~dev557/neutron/tests/common/conn_testers.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/conn_testers.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/conn_testers.py 2016-08-29 20:05:49.000000000 +0000 @@ -64,6 +64,7 @@ ARP = n_consts.ETHERTYPE_NAME_ARP INGRESS = firewall.INGRESS_DIRECTION EGRESS = firewall.EGRESS_DIRECTION + ICMP_COUNT = 1 def __init__(self, ip_cidr): self.ip_cidr = ip_cidr @@ -157,7 +158,8 @@ icmp_timeout = ICMP_VERSION_TIMEOUTS[ip_version] try: net_helpers.assert_ping(src_namespace, ip_address, - timeout=icmp_timeout) + timeout=icmp_timeout, + count=self.ICMP_COUNT) except RuntimeError: raise ConnectionTesterException( "ICMP packets can't get from %s namespace to %s address" % ( @@ -317,7 +319,28 @@ 'sending icmp packets to %s' % destination) -class OVSConnectionTester(ConnectionTester): +class OVSBaseConnectionTester(ConnectionTester): + + @property + def peer_port_id(self): + return self._peer.port.id + + @property + def vm_port_id(self): + return self._vm.port.id + + @staticmethod + def set_tag(port_name, bridge, tag): + bridge.set_db_attribute('Port', port_name, 'tag', tag) + other_config = bridge.db_get_val( + 'Port', port_name, 'other_config') + other_config['tag'] = tag + bridge.set_db_attribute( + 'Port', port_name, 'other_config', other_config) + + +class OVSConnectionTester(OVSBaseConnectionTester): + """Tester with OVS bridge in the middle The endpoints are created as OVS ports attached to the OVS bridge. @@ -347,27 +370,103 @@ for column, value in attrs: self.bridge.set_db_attribute('Interface', port.name, column, value) - @property - def peer_port_id(self): - return self._peer.port.id + def set_vm_tag(self, tag): + self.set_tag(self._vm.port.name, self.bridge, tag) - @property - def vm_port_id(self): - return self._vm.port.id + def set_peer_tag(self, tag): + self.set_tag(self._peer.port.name, self.bridge, tag) - def set_tag(self, port_name, tag): - self.bridge.set_db_attribute('Port', port_name, 'tag', tag) - other_config = self.bridge.db_get_val( - 'Port', port_name, 'other_config') - other_config['tag'] = tag - self.bridge.set_db_attribute( - 'Port', port_name, 'other_config', other_config) + +class OVSTrunkConnectionTester(OVSBaseConnectionTester): + """Tester with OVS bridge and a trunk bridge + + Two endpoints: one is a VM that is connected to a port associated with a + trunk (the port is created on the trunk bridge), the other is a VM on the + same network (the port is on the integration bridge). + + NOTE: The OVS ports are connected from the namespace. This connection is + currently not supported in OVS and may lead to unpredicted behavior: + https://bugzilla.redhat.com/show_bug.cgi?id=1160340 + + """ + ICMP_COUNT = 3 + + def __init__(self, ip_cidr, br_trunk_name): + super(OVSTrunkConnectionTester, self).__init__(ip_cidr) + self._br_trunk_name = br_trunk_name + + def _setUp(self): + super(OVSTrunkConnectionTester, self)._setUp() + self.bridge = self.useFixture( + net_helpers.OVSBridgeFixture()).bridge + self.br_trunk = self.useFixture( + net_helpers.OVSTrunkBridgeFixture(self._br_trunk_name)).bridge + self._peer = self.useFixture(machine_fixtures.FakeMachine( + self.bridge, self.ip_cidr)) + ip_cidr = net_helpers.increment_ip_cidr(self.ip_cidr, 1) + + self._vm = self.useFixture(machine_fixtures.FakeMachine( + self.br_trunk, ip_cidr)) + + def add_vlan_interface_and_peer(self, vlan, ip_cidr): + """Create a sub_port and a peer + + We create a sub_port that uses vlan as segmentation ID. In the vm + namespace we create a vlan subinterface on the same vlan. + A peer on the same network is created. When pinging from the peer + to the sub_port packets will be tagged using the internal vlan ID + of the network. The sub_port will remove that vlan tag and push the + vlan specified in the segmentation ID. The packets will finally reach + the vlan subinterface in the vm namespace. + + """ + + ip_wrap = ip_lib.IPWrapper(self._vm.namespace) + dev_name = self._vm.port.name + ".%d" % vlan + ip_wrap.add_vlan(dev_name, self._vm.port.name, vlan) + dev = ip_wrap.device(dev_name) + dev.addr.add(ip_cidr) + dev.link.set_up() + self._ip_vlan = ip_cidr.partition('/')[0] + ip_cidr = net_helpers.increment_ip_cidr(ip_cidr, 1) + self._peer2 = self.useFixture(machine_fixtures.FakeMachine( + self.bridge, ip_cidr)) def set_vm_tag(self, tag): - self.set_tag(self._vm.port.name, tag) + self.set_tag(self._vm.port.name, self.br_trunk, tag) def set_peer_tag(self, tag): - self.set_tag(self._peer.port.name, tag) + self.set_tag(self._peer.port.name, self.bridge, tag) + + def _get_subport_namespace_and_address(self, direction): + if direction == self.INGRESS: + return self._peer2.namespace, self._ip_vlan + return self._vm.namespace, self._peer2.ip + + def test_sub_port_icmp_connectivity(self, direction): + + src_namespace, ip_address = self._get_subport_namespace_and_address( + direction) + ip_version = ip_lib.get_ip_version(ip_address) + icmp_timeout = ICMP_VERSION_TIMEOUTS[ip_version] + try: + net_helpers.assert_ping(src_namespace, ip_address, + timeout=icmp_timeout, + count=self.ICMP_COUNT) + except RuntimeError: + raise ConnectionTesterException( + "ICMP packets can't get from %s namespace to %s address" % ( + src_namespace, ip_address)) + + def test_sub_port_icmp_no_connectivity(self, direction): + try: + self.test_sub_port_icmp_connectivity(direction) + except ConnectionTesterException: + pass + else: + raise ConnectionTesterException( + 'Established %s connection with protocol ICMP, ' % ( + direction)) class LinuxBridgeConnectionTester(ConnectionTester): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/helpers.py neutron-9.0.0~b3~dev557/neutron/tests/common/helpers.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/helpers.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/helpers.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,6 +14,7 @@ import datetime import os +import random from neutron_lib import constants from oslo_utils import timeutils @@ -21,7 +22,6 @@ import testtools import neutron -from neutron.common import constants as n_const from neutron.common import topics from neutron import context from neutron.db import agents_db @@ -78,7 +78,7 @@ admin_context, agent['agent_type'], agent['host']) -def register_l3_agent(host=HOST, agent_mode=n_const.L3_AGENT_MODE_LEGACY, +def register_l3_agent(host=HOST, agent_mode=constants.L3_AGENT_MODE_LEGACY, internal_only=True, ext_net_id='', ext_bridge='', az=DEFAULT_AZ): agent = _get_l3_agent_dict(host, agent_mode, internal_only, ext_net_id, @@ -212,3 +212,11 @@ def requires_py3(testcase): return testtools.skipUnless(six.PY3, "requires python 3.x")(testcase) + + +def get_not_used_vlan(bridge, vlan_range): + port_vlans = bridge.ovsdb.db_find( + 'Port', ('tag', '!=', []), columns=['tag']).execute() + used_vlan_tags = {val['tag'] for val in port_vlans} + available_vlans = vlan_range - used_vlan_tags + return random.choice(list(available_vlans)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/l3_test_common.py neutron-9.0.0~b3~dev557/neutron/tests/common/l3_test_common.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/l3_test_common.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/l3_test_common.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,7 +15,7 @@ import copy import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from oslo_utils import uuidutils from six import moves @@ -33,7 +33,7 @@ subnet_id = _uuid() return {'admin_state_up': True, 'device_id': _uuid(), - 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_HA_INTF, + 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_HA_INTF, 'fixed_ips': [{'ip_address': ip, 'prefixlen': 18, 'subnet_id': subnet_id}], @@ -103,12 +103,12 @@ router = { 'id': router_id, 'distributed': False, - l3_constants.INTERFACE_KEY: [], + lib_constants.INTERFACE_KEY: [], 'routes': routes, 'gw_port': ex_gw_port} if enable_floating_ip: - router[l3_constants.FLOATINGIP_KEY] = [{ + router[lib_constants.FLOATINGIP_KEY] = [{ 'id': _uuid(), 'port_id': _uuid(), 'status': 'DOWN', @@ -120,7 +120,7 @@ if enable_ha: router['ha'] = True router['ha_vr_id'] = 1 - router[l3_constants.HA_INTERFACE_KEY] = (get_ha_interface()) + router[lib_constants.HA_INTERFACE_KEY] = (get_ha_interface()) if enable_snat is not None: router['enable_snat'] = enable_snat @@ -133,7 +133,7 @@ def router_append_interface(router, count=1, ip_version=4, ra_mode=None, addr_mode=None, dual_stack=False): - interfaces = router[l3_constants.INTERFACE_KEY] + interfaces = router[lib_constants.INTERFACE_KEY] current = sum( [netaddr.IPNetwork(subnet['cidr']).version == ip_version for p in interfaces for subnet in p['subnets']]) @@ -203,7 +203,7 @@ else: raise ValueError("Invalid ip_version: %s" % ip_version) - interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, [])) + interfaces = copy.deepcopy(router.get(lib_constants.INTERFACE_KEY, [])) if interface_id: try: interface = next(i for i in interfaces @@ -248,11 +248,11 @@ 'fixed_ips': fixed_ips, 'subnets': subnets}) - router[l3_constants.INTERFACE_KEY] = interfaces + router[lib_constants.INTERFACE_KEY] = interfaces def router_append_pd_enabled_subnet(router, count=1): - interfaces = router[l3_constants.INTERFACE_KEY] + interfaces = router[lib_constants.INTERFACE_KEY] current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6 for p in interfaces for subnet in p['subnets']) @@ -271,8 +271,8 @@ 'subnets': [{'id': subnet_id, 'cidr': n_const.PROVISIONAL_IPV6_PD_PREFIX, 'gateway_ip': '::1', - 'ipv6_ra_mode': n_const.IPV6_SLAAC, - 'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]} + 'ipv6_ra_mode': lib_constants.IPV6_SLAAC, + 'subnetpool_id': lib_constants.IPV6_PD_POOL_ID}]} interfaces.append(intf) pd_intfs.append(intf) mac_address.value += 1 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/machine_fixtures.py neutron-9.0.0~b3~dev557/neutron/tests/common/machine_fixtures.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/machine_fixtures.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/machine_fixtures.py 2016-08-03 20:10:34.000000000 +0000 @@ -18,7 +18,7 @@ import fixtures from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils +from neutron.common import utils from neutron.tests.common import net_helpers diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/common/net_helpers.py neutron-9.0.0~b3~dev557/neutron/tests/common/net_helpers.py --- neutron-9.0.0~b2~dev280/neutron/tests/common/net_helpers.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/common/net_helpers.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,6 +24,7 @@ import shlex import signal import subprocess +import time import fixtures import netaddr @@ -37,7 +38,9 @@ from neutron.agent.linux import bridge_lib from neutron.agent.linux import interface from neutron.agent.linux import ip_lib +from neutron.agent.linux import iptables_firewall from neutron.agent.linux import utils +from neutron.common import utils as common_utils from neutron.db import db_base_plugin_common from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as linuxbridge_agent @@ -95,7 +98,7 @@ port_dev.route.add_gateway(gateway_ip) -def assert_ping(src_namespace, dst_ip, timeout=1, count=1): +def assert_ping(src_namespace, dst_ip, timeout=1, count=3): ipversion = netaddr.IPAddress(dst_ip).version ping_command = 'ping' if ipversion == 4 else 'ping6' ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) @@ -103,10 +106,28 @@ dst_ip]) +def assert_async_ping(src_namespace, dst_ip, timeout=1, count=1, interval=1): + ipversion = netaddr.IPAddress(dst_ip).version + ping_command = 'ping' if ipversion == 4 else 'ping6' + ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) + + # See bug 1588731 for explanation why using -c count ping option + # cannot be used and it needs to be done using the following workaround. + for _index in range(count): + start_time = time.time() + ns_ip_wrapper.netns.execute([ping_command, '-c', '1', '-W', timeout, + dst_ip]) + end_time = time.time() + diff = end_time - start_time + if 0 < diff < interval: + # wait at most "interval" seconds between individual pings + time.sleep(interval - diff) + + @contextlib.contextmanager def async_ping(namespace, ips): with futures.ThreadPoolExecutor(max_workers=len(ips)) as executor: - fs = [executor.submit(assert_ping, namespace, ip, count=10) + fs = [executor.submit(assert_async_ping, namespace, ip, count=10) for ip in ips] yield lambda: all(f.done() for f in fs) futures.wait(fs) @@ -159,7 +180,12 @@ return ports -def get_unused_port(used, start=1024, end=65535): +def get_unused_port(used, start=1024, end=None): + if end is None: + port_range = utils.execute( + ['sysctl', '-n', 'net.ipv4.ip_local_port_range']) + end = int(port_range.split()[0]) - 1 + candidates = set(range(start, end + 1)) return random.choice(list(candidates - used)) @@ -180,7 +206,7 @@ elif protocol == n_const.PROTO_NAME_UDP: param = '-una' else: - raise ValueError("Unsupported procotol %s" % protocol) + raise ValueError("Unsupported protocol %s" % protocol) ip_wrapper = ip_lib.IPWrapper(namespace=namespace) output = ip_wrapper.netns.execute(['ss', param]) @@ -237,7 +263,7 @@ poller = select.poll() poller.register(stream.fileno()) poll_predicate = functools.partial(poller.poll, 1) - utils.wait_until_true(poll_predicate, timeout, 0.1, + common_utils.wait_until_true(poll_predicate, timeout, 0.1, RuntimeError( 'No output in %.2f seconds' % timeout)) return stream.readline() @@ -254,7 +280,7 @@ if utils.pid_invoked_with_cmdline(child_pid, self.cmd): return True - utils.wait_until_true( + common_utils.wait_until_true( child_is_running, timeout, exception=RuntimeError("Process %s hasn't been spawned " @@ -304,7 +330,7 @@ def _wait_for_death(self): is_dead = lambda: self.proc.poll() is not None - utils.wait_until_true( + common_utils.wait_until_true( is_dead, timeout=self.TIMEOUT, exception=RuntimeError( "Ping command hasn't ended after %d seconds." % self.TIMEOUT)) @@ -343,7 +369,7 @@ self._wait_for_death() self._parse_stats() else: - raise RuntimeError("Pinger is running infinitelly, use stop() " + raise RuntimeError("Pinger is running infinitely, use stop() " "first") @@ -628,10 +654,11 @@ self.bridge = self.useFixture(self._create_bridge_fixture()).bridge @classmethod - def get(cls, bridge, namespace=None, mac=None, port_id=None): + def get(cls, bridge, namespace=None, mac=None, port_id=None, + hybrid_plug=False): """Deduce PortFixture class from bridge type and instantiate it.""" if isinstance(bridge, ovs_lib.OVSBridge): - return OVSPortFixture(bridge, namespace, mac, port_id) + return OVSPortFixture(bridge, namespace, mac, port_id, hybrid_plug) if isinstance(bridge, bridge_lib.BridgeDevice): return LinuxBridgePortFixture(bridge, namespace, mac, port_id) if isinstance(bridge, VethBridge): @@ -658,7 +685,21 @@ self.addCleanup(self.bridge.destroy) +class OVSTrunkBridgeFixture(OVSBridgeFixture): + """This bridge doesn't generate the name.""" + def _setUp(self): + ovs = ovs_lib.BaseOVS() + self.bridge = ovs.add_bridge(self.prefix) + self.addCleanup(self.bridge.destroy) + + class OVSPortFixture(PortFixture): + NIC_NAME_LEN = 14 + + def __init__(self, bridge=None, namespace=None, mac=None, port_id=None, + hybrid_plug=False): + super(OVSPortFixture, self).__init__(bridge, namespace, mac, port_id) + self.hybrid_plug = hybrid_plug def _create_bridge_fixture(self): return OVSBridgeFixture() @@ -666,10 +707,6 @@ def _setUp(self): super(OVSPortFixture, self)._setUp() - interface_config = cfg.ConfigOpts() - interface_config.register_opts(interface.OPTS) - ovs_interface = interface.OVSInterfaceDriver(interface_config) - # because in some tests this port can be used to providing connection # between linuxbridge agents and vlan_id can be also added to this # device name it has to be max LB_DEVICE_NAME_MAX_LEN long @@ -677,6 +714,19 @@ LB_DEVICE_NAME_MAX_LEN, PORT_PREFIX ) + + if self.hybrid_plug: + self.hybrid_plug_port(port_name) + else: + self.plug_port(port_name) + + def plug_port(self, port_name): + # TODO(jlibosva): Don't use interface driver for fullstack fake + # machines as the port should be treated by OVS agent and not by + # external party + interface_config = cfg.ConfigOpts() + interface_config.register_opts(interface.OPTS) + ovs_interface = interface.OVSInterfaceDriver(interface_config) ovs_interface.plug_new( None, self.port_id, @@ -687,6 +737,52 @@ self.addCleanup(self.bridge.delete_port, port_name) self.port = ip_lib.IPDevice(port_name, self.namespace) + def hybrid_plug_port(self, port_name): + """Plug port with linux bridge in the middle. + + """ + ip_wrapper = ip_lib.IPWrapper(self.namespace) + qvb_name, qvo_name = self._get_veth_pair_names(self.port_id) + qvb, qvo = self.useFixture(NamedVethFixture(qvb_name, qvo_name)).ports + qvb.link.set_up() + qvo.link.set_up() + qbr_name = self._get_br_name(self.port_id) + self.qbr = self.useFixture( + LinuxBridgeFixture(qbr_name, + namespace=None, + prefix_is_full_name=True)).bridge + self.qbr.link.set_up() + self.qbr.setfd(0) + self.qbr.disable_stp() + self.qbr.addif(qvb_name) + qvo_attrs = ('external_ids', {'iface-id': self.port_id, + 'iface-status': 'active', + 'attached-mac': self.mac}) + self.bridge.add_port(qvo_name, qvo_attrs) + + # NOTE(jlibosva): Create fake vm port, instead of tap device, we use + # veth pair here in order to be able to attach it to linux bridge in + # root namespace. Name with tap is in root namespace and its peer is in + # the namespace + hybrid_port_name = iptables_firewall.get_hybrid_port_name(self.port_id) + bridge_port, self.port = self.useFixture( + NamedVethFixture(hybrid_port_name)).ports + self.addCleanup(self.port.link.delete) + ip_wrapper.add_device_to_namespace(self.port) + bridge_port.link.set_up() + self.qbr.addif(bridge_port) + + self.port.link.set_address(self.mac) + self.port.link.set_up() + + # NOTE(jlibosva): Methods below are taken from nova.virt.libvirt.vif + def _get_br_name(self, iface_id): + return ("qbr" + iface_id)[:self.NIC_NAME_LEN] + + def _get_veth_pair_names(self, iface_id): + return (("qvb%s" % iface_id)[:self.NIC_NAME_LEN], + ("qvo%s" % iface_id)[:self.NIC_NAME_LEN]) + class LinuxBridgeFixture(fixtures.Fixture): """Create a linux bridge. diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/contrib/gate_hook.sh neutron-9.0.0~b3~dev557/neutron/tests/contrib/gate_hook.sh --- neutron-9.0.0~b2~dev280/neutron/tests/contrib/gate_hook.sh 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/contrib/gate_hook.sh 2016-08-29 20:05:49.000000000 +0000 @@ -44,16 +44,16 @@ configure_host_for_func_testing - if [[ "$VENV" =~ "dsvm-functional" ]]; then - # The OVS_BRANCH variable is used by git checkout. In the case below - # we use a commit on branch-2.5 that fixes compilation with the - # latest ubuntu trusty kernel. - OVS_BRANCH=8c0b419a0b9ac0141d6973dcc80306dfc6a83d31 - remove_ovs_packages - compile_ovs True /usr /var - start_new_ovs - fi + # The OVS_BRANCH variable is used by git checkout. In the case below, + # we use a current (2016-08-19) HEAD commit from branch-2.5 that contains + # a fix for usage of VXLAN tunnels on a single node: + # https://github.com/openvswitch/ovs/commit/741f47cf35df2bfc7811b2cff75c9bb8d05fd26f + OVS_BRANCH=042326c3fcf61e8638fa15926f984ce5ae142f4b + remove_ovs_packages + compile_ovs True /usr /var + start_new_ovs + load_conf_hook iptables_verify # Make the workspace owned by the stack user sudo chown -R $STACK_USER:$STACK_USER $BASE ;; @@ -63,10 +63,12 @@ # NOTE(ihrachys): note the order of hook post-* sections is significant: [quotas] hook should # go before other hooks modifying [DEFAULT]. See LP#1583214 for details. load_conf_hook quotas - load_conf_hook sorting - load_conf_hook pagination load_rc_hook qos + load_rc_hook trunk load_conf_hook osprofiler + if [[ "$VENV" =~ "dsvm-scenario" ]]; then + load_conf_hook iptables_verify + fi if [[ "$VENV" =~ "pecan" ]]; then load_conf_hook pecan fi diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/api_extensions neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/api_extensions --- neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/api_extensions 2016-05-23 21:19:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/api_extensions 2016-08-29 20:05:49.000000000 +0000 @@ -21,17 +21,22 @@ net-mtu, \ network-ip-availability, \ network_availability_zone, \ + pagination, \ port-security, \ provider, \ qos, \ quotas, \ rbac-policies, \ + revisions, \ router, \ router_availability_zone, \ security-group, \ service-type, \ + sorting, \ standard-attr-description, \ subnet_allocation, \ tag, \ - timestamp_core" + timestamp_core, \ + trunk, \ + trunk-details" NETWORK_API_EXTENSIONS="$(echo $NETWORK_API_EXTENSIONS | tr -d ' ')" diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/iptables_verify neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/iptables_verify --- neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/iptables_verify 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/iptables_verify 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,4 @@ +[[post-config|/etc/neutron/neutron.conf]] + +[AGENT] +debug_iptables_rules=True diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/pagination neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/pagination --- neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/pagination 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/pagination 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -[[post-extra|$TEMPEST_CONFIG]] - -[neutron_plugin_options] -validate_pagination = True - -[[post-config|$NEUTRON_CONF]] - -[DEFAULT] -allow_pagination=True diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/sorting neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/sorting --- neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/sorting 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/sorting 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -[[post-extra|$TEMPEST_CONFIG]] - -[neutron_plugin_options] -validate_sorting = True - -[[post-config|$NEUTRON_CONF]] - -[DEFAULT] -allow_sorting=True diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/trunk neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/trunk --- neutron-9.0.0~b2~dev280/neutron/tests/contrib/hooks/trunk 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/contrib/hooks/trunk 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1 @@ +enable_service q-trunk diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/etc/policy.json neutron-9.0.0~b3~dev557/neutron/tests/etc/policy.json --- neutron-9.0.0~b2~dev280/neutron/tests/etc/policy.json 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/etc/policy.json 2016-08-29 20:05:49.000000000 +0000 @@ -17,9 +17,11 @@ "create_subnet": "rule:admin_or_network_owner", "create_subnet:segment_id": "rule:admin_only", + "create_subnet:service_types": "rule:admin_only", "get_subnet": "rule:admin_or_owner or rule:shared", "get_subnet:segment_id": "rule:admin_only", "update_subnet": "rule:admin_or_network_owner", + "update_subnet:service_types": "rule:admin_only", "delete_subnet": "rule:admin_or_network_owner", "create_subnetpool": "", @@ -218,5 +220,12 @@ "create_flavor_service_profile": "rule:admin_only", "delete_flavor_service_profile": "rule:admin_only", "get_flavor_service_profile": "rule:regular_user", - "get_auto_allocated_topology": "rule:admin_or_owner" + "get_auto_allocated_topology": "rule:admin_or_owner", + + "create_trunk": "rule:regular_user", + "get_trunk": "rule:admin_or_owner", + "delete_trunk": "rule:admin_or_owner", + "get_subports": "", + "add_subports": "rule:admin_or_owner", + "remove_subports": "rule:admin_or_owner" } diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/base.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/base.py 2016-08-03 20:10:34.000000000 +0000 @@ -15,14 +15,12 @@ import os from oslo_config import cfg -from oslo_db.sqlalchemy import test_base -from neutron.db.migration import cli as migration from neutron.tests import base as tests_base -from neutron.tests.common import base from neutron.tests.common import helpers from neutron.tests.fullstack.resources import client as client_resource from neutron.tests import tools +from neutron.tests.unit import testlib_api # This is the directory from which infra fetches log files for fullstack tests @@ -30,20 +28,34 @@ 'dsvm-fullstack-logs') -class BaseFullStackTestCase(base.MySQLTestCase): +class BaseFullStackTestCase(testlib_api.MySQLTestCaseMixin, + testlib_api.SqlTestCase): """Base test class for full-stack tests.""" + BUILD_WITH_MIGRATIONS = True + def setUp(self, environment): super(BaseFullStackTestCase, self).setUp() tests_base.setup_test_logging( cfg.CONF, DEFAULT_LOG_DIR, '%s.txt' % self.get_name()) + # NOTE(zzzeek): the opportunistic DB fixtures have built for + # us a per-test (or per-process) database. Set the URL of this + # database in CONF as the full stack tests need to actually run a + # neutron server against this database. + _orig_db_url = cfg.CONF.database.connection + cfg.CONF.set_override( + 'connection', str(self.engine.url), group='database') + self.addCleanup( + cfg.CONF.set_override, + "connection", _orig_db_url, group="database" + ) + # NOTE(ihrachys): seed should be reset before environment fixture below # since the latter starts services that may rely on generated port # numbers tools.reset_random_seed() - self.create_db_tables() self.environment = environment self.environment.test_name = self.get_name() self.useFixture(self.environment) @@ -54,35 +66,3 @@ def get_name(self): class_name, test_name = self.id().split(".")[-2:] return "%s.%s" % (class_name, test_name) - - def create_db_tables(self): - """Populate the new database. - - MySQLTestCase creates a new database for each test, but these need to - be populated with the appropriate tables. Before we can do that, we - must change the 'connection' option which the Neutron code knows to - look at. - - Currently, the username and password options are hard-coded by - oslo.db and neutron/tests/functional/contrib/gate_hook.sh. Also, - we only support MySQL for now, but the groundwork for adding Postgres - is already laid. - """ - conn = ("mysql+pymysql://%(username)s:%(password)s" - "@127.0.0.1/%(db_name)s" % { - 'username': test_base.DbFixture.USERNAME, - 'password': test_base.DbFixture.PASSWORD, - 'db_name': self.engine.url.database}) - - alembic_config = migration.get_neutron_config() - alembic_config.neutron_config = cfg.CONF - self.original_conn = cfg.CONF.database.connection - self.addCleanup(self._revert_connection_address) - cfg.CONF.set_override('connection', conn, group='database') - - migration.do_alembic_command(alembic_config, 'upgrade', 'heads') - - def _revert_connection_address(self): - cfg.CONF.set_override('connection', - self.original_conn, - group='database') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/config.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/config.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/config.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -65,7 +65,7 @@ 'lock_path': '$state_path/lock', 'api_paste_config': self._generate_api_paste(), 'policy_file': self._generate_policy_json(), - 'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'core_plugin': 'ml2', 'service_plugins': ','.join(service_plugins), 'auth_strategy': 'noauth', 'debug': 'True', @@ -264,8 +264,6 @@ def _prepare_config_with_ovs_agent(self, integration_bridge): self.config.update({ 'DEFAULT': { - 'l3_agent_manager': ('neutron.agent.l3_agent.' - 'L3NATAgentWithStateReport'), 'interface_driver': ('neutron.agent.linux.interface.' 'OVSInterfaceDriver'), 'ovs_integration_bridge': integration_bridge, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/environment.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/environment.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/environment.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/environment.py 2016-08-03 20:10:34.000000000 +0000 @@ -18,7 +18,6 @@ from oslo_config import cfg from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as lb_agent @@ -284,7 +283,7 @@ self.hosts = [] def wait_until_env_is_up(self): - utils.wait_until_true(self._processes_are_ready) + common_utils.wait_until_true(self._processes_are_ready) def _processes_are_ready(self): try: diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/machine.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/machine.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/machine.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/machine.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,7 +16,8 @@ from neutron_lib import constants -from neutron.agent.linux import utils +from neutron.common import utils +from neutron.extensions import portbindings as pbs from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers @@ -43,11 +44,13 @@ tenant_id=self.tenant_id, hostname=self.host.hostname) mac_address = self.neutron_port['mac_address'] + hybrid_plug = self.neutron_port[pbs.VIF_DETAILS].get( + pbs.OVS_HYBRID_PLUG, False) self.port = self.useFixture( net_helpers.PortFixture.get( self.bridge, self.namespace, mac_address, - self.neutron_port['id'])).port + self.neutron_port['id'], hybrid_plug)).port for fixed_ip in self.neutron_port['fixed_ips']: self._configure_ipaddress(fixed_ip) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/process.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/process.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/resources/process.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/resources/process.py 2016-08-03 20:10:34.000000000 +0000 @@ -118,7 +118,7 @@ config_filenames=config_filenames, kill_signal=signal.SIGTERM)) - utils.wait_until_true(self.server_is_live) + common_utils.wait_until_true(self.server_is_live) def server_is_live(self): try: diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/test_connectivity.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/test_connectivity.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/test_connectivity.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/test_connectivity.py 2016-08-03 20:10:34.000000000 +0000 @@ -19,9 +19,9 @@ from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine +from neutron.tests.unit import testlib_api - -load_tests = testscenarios.load_tests_apply_scenarios +load_tests = testlib_api.module_load_tests class BaseConnectivitySameNetworkTest(base.BaseFullStackTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/test_l3_agent.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/test_l3_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/test_l3_agent.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/test_l3_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -21,13 +21,15 @@ from neutron.agent.l3 import agent as l3_agent from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils from neutron.common import utils as common_utils from neutron.tests.common.exclusive_resources import ip_network from neutron.tests.common import machine_fixtures from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine +from neutron.tests.unit import testlib_api + +load_tests = testlib_api.module_load_tests class TestL3Agent(base.BaseFullStackTestCase): @@ -47,7 +49,7 @@ def is_port_status_active(): port = self.client.show_port(port_id) return port['port']['status'] == 'ACTIVE' - utils.wait_until_true(lambda: is_port_status_active(), sleep=1) + common_utils.wait_until_true(lambda: is_port_status_active(), sleep=1) def _create_net_subnet_and_vm(self, tenant_id, subnet_cidrs, host, router): network = self.safe_client.create_network(tenant_id) @@ -87,7 +89,7 @@ def _assert_namespace_exists(self, ns_name): ip = ip_lib.IPWrapper(ns_name) - utils.wait_until_true(lambda: ip.netns.exists(ns_name)) + common_utils.wait_until_true(lambda: ip.netns.exists(ns_name)) def test_namespace_exists(self): tenant_id = uuidutils.generate_uuid() @@ -173,7 +175,7 @@ self.assertEqual(2, len(agents['agents']), 'HA router must be scheduled to both nodes') - utils.wait_until_true( + common_utils.wait_until_true( functools.partial( self._is_ha_router_active_on_one_agent, router['id']), diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/fullstack/test_qos.py neutron-9.0.0~b3~dev557/neutron/tests/fullstack/test_qos.py --- neutron-9.0.0~b2~dev280/neutron/tests/fullstack/test_qos.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/fullstack/test_qos.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,28 +16,27 @@ from neutron_lib import constants from oslo_utils import uuidutils -import testscenarios from neutron.agent.common import ovs_lib from neutron.agent.linux import bridge_lib from neutron.agent.linux import tc_lib -from neutron.agent.linux import utils +from neutron.common import utils from neutron.services.qos import qos_consts from neutron.tests.common.agents import l2_extensions from neutron.tests.fullstack import base from neutron.tests.fullstack.resources import environment from neutron.tests.fullstack.resources import machine +from neutron.tests.unit import testlib_api -from neutron.plugins.ml2.drivers.linuxbridge.agent.common import \ - config as linuxbridge_agent_config +from neutron.conf.plugins.ml2.drivers import linuxbridge as \ + linuxbridge_agent_config from neutron.plugins.ml2.drivers.linuxbridge.agent import \ linuxbridge_neutron_agent as linuxbridge_agent from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \ mech_openvswitch as mech_ovs -load_tests = testscenarios.load_tests_apply_scenarios - +load_tests = testlib_api.module_load_tests BANDWIDTH_BURST = 100 BANDWIDTH_LIMIT = 500 @@ -191,7 +190,7 @@ rule = self.safe_client.create_dscp_marking_rule( self.tenant_id, qos_policy_id, dscp_mark) # Make it consistent with GET reply - rule['type'] = qos_consts.RULE_TYPE_DSCP_MARK + rule['type'] = qos_consts.RULE_TYPE_DSCP_MARKING rule['qos_policy_id'] = qos_policy_id qos_policy['rules'].append(rule) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l2/base.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l2/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l2/base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l2/base.py 2016-08-03 20:10:34.000000000 +0000 @@ -24,12 +24,11 @@ from neutron.agent.common import config as agent_config from neutron.agent.common import ovs_lib -from neutron.agent.l2.extensions import manager as ext_manager +from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager from neutron.agent.linux import interface from neutron.agent.linux import polling -from neutron.agent.linux import utils as agent_utils -from neutron.common import config as common_config from neutron.common import utils +from neutron.conf import common as common_config from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.openvswitch.agent.common import config \ as ovs_config @@ -156,7 +155,7 @@ self._mock_get_events(agent, polling_manager, ports) self.addCleanup(polling_manager.stop) polling_manager.start() - agent_utils.wait_until_true( + utils.wait_until_true( polling_manager._monitor.is_active) agent.check_ovs_status = mock.Mock( return_value=constants.OVS_NORMAL) @@ -213,7 +212,7 @@ 'physical_network': network.get('physical_network', 'physnet'), 'segmentation_id': network.get('segmentation_id', 1), 'fixed_ips': port['fixed_ips'], - 'device_owner': 'compute', + 'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX, 'port_security_enabled': True, 'security_groups': ['default'], 'admin_state_up': True} @@ -228,9 +227,9 @@ return agent.int_br.db_get_val( 'Interface', port, 'options', check_error=True) - agent_utils.wait_until_true( + utils.wait_until_true( lambda: get_peer(self.patch_int) == {'peer': self.patch_tun}) - agent_utils.wait_until_true( + utils.wait_until_true( lambda: get_peer(self.patch_tun) == {'peer': self.patch_int}) def assert_bridge_ports(self): @@ -361,7 +360,7 @@ def wait_until_ports_state(self, ports, up, timeout=60): port_ids = [p['id'] for p in ports] - agent_utils.wait_until_true( + utils.wait_until_true( lambda: self._expected_plugin_rpc_call( self.agent.plugin_rpc.update_device_list, port_ids, up), timeout=timeout) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l2/extensions/test_ovs_agent_qos_extension.py 2016-08-03 20:10:34.000000000 +0000 @@ -229,7 +229,7 @@ policy_copy.rules[0].max_kbps = 500 policy_copy.rules[0].max_burst_kbps = 5 policy_copy.rules[1].dscp_mark = TEST_DSCP_MARK_2 - consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED) + consumer_reg.push(resources.QOS_POLICY, [policy_copy], events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(self.ports[0], policy_copy.rules[0]) self._assert_bandwidth_limit_rule_is_set(self.ports[0], @@ -265,6 +265,6 @@ policy_copy = copy.deepcopy(self.qos_policies[TEST_POLICY_ID1]) policy_copy.rules = list() - consumer_reg.push(resources.QOS_POLICY, policy_copy, events.UPDATED) + consumer_reg.push(resources.QOS_POLICY, [policy_copy], events.UPDATED) self.wait_until_bandwidth_limit_rule_applied(port_dict, None) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/framework.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/framework.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/framework.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/framework.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,7 +17,7 @@ import mock import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils @@ -29,10 +29,8 @@ from neutron.agent import l3_agent as l3_agent_main from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils -from neutron.common import config as common_config -from neutron.common import constants as n_const from neutron.common import utils as common_utils +from neutron.conf import common as common_config from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers from neutron.tests.functional import base @@ -124,13 +122,13 @@ router_info = self.generate_router_info(enable_ha=ha) router = self.manage_router(self.agent, router_info) - port = net_helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP, - router.ns_name) + port = net_helpers.get_free_namespace_port( + constants.PROTO_NAME_TCP, router.ns_name) client_address = '19.4.4.3' server_address = '35.4.0.4' def clean_fips(router): - router.router[l3_constants.FLOATINGIP_KEY] = [] + router.router[constants.FLOATINGIP_KEY] = [] clean_fips(router) self._add_fip(router, client_address, fixed_address=server_address) @@ -149,7 +147,7 @@ n, len([line for line in out.strip().split('\n') if line])) if ha: - utils.wait_until_true(lambda: router.ha_state == 'master') + common_utils.wait_until_true(lambda: router.ha_state == 'master') with self.assert_max_execution_time(100): assert_num_of_conntrack_rules(0) @@ -171,8 +169,8 @@ # Assert that every defined FIP is updated via RPC expected_fips = set([ - (fip['id'], l3_constants.FLOATINGIP_STATUS_ACTIVE) for fip in - router.router[l3_constants.FLOATINGIP_KEY]]) + (fip['id'], constants.FLOATINGIP_STATUS_ACTIVE) for fip in + router.router[constants.FLOATINGIP_KEY]]) call = [args[0] for args in rpc.call_args_list][0] actual_fips = set( [(fip_id, status) for fip_id, status in call[2].items()]) @@ -189,7 +187,7 @@ def ha_router_dev_name_getter(not_used): return router.get_ha_device_name() self.assertTrue(self.device_exists_with_ips_and_mac( - router.router[l3_constants.HA_INTERFACE_KEY], + router.router[constants.HA_INTERFACE_KEY], ha_router_dev_name_getter, router.ns_name)) def _assert_gateway(self, router, v6_ext_gw_with_sub=True): @@ -210,6 +208,15 @@ external_port, router.get_external_device_name, router.ns_name)) + def _assert_ipv6_accept_ra(self, router): + external_port = router.get_ex_gw_port() + external_device_name = router.get_external_device_name( + external_port['id']) + ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name) + ra_state = ip_wrapper.netns.execute(['sysctl', '-b', + 'net.ipv6.conf.%s.accept_ra' % external_device_name]) + self.assertEqual('2', ra_state) + def _router_lifecycle(self, enable_ha, ip_version=4, dual_stack=False, v6_ext_gw_with_sub=True): router_info = self.generate_router_info(enable_ha, ip_version, @@ -219,7 +226,7 @@ router = self.manage_router(self.agent, router_info) # Add multiple-IPv6-prefix internal router port - slaac = n_const.IPV6_SLAAC + slaac = constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} subnet_modes = [slaac_mode] * 2 self._add_internal_interface_by_subnet(router.router, @@ -233,22 +240,22 @@ interface_name = router.get_external_device_name(port['id']) self._assert_no_ip_addresses_on_interface(router.ns_name, interface_name) - utils.wait_until_true(lambda: router.ha_state == 'master') + common_utils.wait_until_true(lambda: router.ha_state == 'master') # Keepalived notifies of a state transition when it starts, # not when it ends. Thus, we have to wait until keepalived finishes # configuring everything. We verify this by waiting until the last # device has an IP address. - device = router.router[l3_constants.INTERFACE_KEY][-1] + device = router.router[constants.INTERFACE_KEY][-1] device_exists = functools.partial( self.device_exists_with_ips_and_mac, device, router.get_internal_device_name, router.ns_name) - utils.wait_until_true(device_exists) + common_utils.wait_until_true(device_exists) self.assertTrue(self._namespace_exists(router.ns_name)) - utils.wait_until_true( + common_utils.wait_until_true( lambda: self._metadata_proxy_exists(self.agent.conf, router)) self._assert_internal_devices(router) self._assert_external_device(router) @@ -271,13 +278,7 @@ # when IPv6 is enabled and no IPv6 gateway is configured. if router.use_ipv6 and not v6_ext_gw_with_sub: if not self.agent.conf.ipv6_gateway: - external_port = router.get_ex_gw_port() - external_device_name = router.get_external_device_name( - external_port['id']) - ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name) - ra_state = ip_wrapper.netns.execute(['sysctl', '-b', - 'net.ipv6.conf.%s.accept_ra' % external_device_name]) - self.assertEqual('2', ra_state) + self._assert_ipv6_accept_ra(router) if enable_ha: self._assert_ha_device(router) @@ -306,7 +307,7 @@ 'fixed_ip_address': fixed_address, 'host': host, 'fixed_ip_address_scope': fixed_ip_address_scope} - router.router[l3_constants.FLOATINGIP_KEY].append(fip) + router.router[constants.FLOATINGIP_KEY].append(fip) def _add_internal_interface_by_subnet(self, router, count=1, ip_version=4, @@ -350,7 +351,7 @@ external_device_name = router.get_external_device_name( external_port['id']) external_device_cidr = self._port_first_ip_cidr(external_port) - internal_port = router.router[l3_constants.INTERFACE_KEY][0] + internal_port = router.router[constants.INTERFACE_KEY][0] int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address']) internal_device_name = router.get_internal_device_name( internal_port['id']) @@ -408,7 +409,7 @@ # then the devices and iptable rules have also been deleted, # so there's no need to check that explicitly. self.assertFalse(self._namespace_exists(router.ns_name)) - utils.wait_until_true( + common_utils.wait_until_true( lambda: not self._metadata_proxy_exists(self.agent.conf, router)) def _assert_snat_chains(self, router): @@ -440,7 +441,7 @@ metadata_port_filter)) def _assert_internal_devices(self, router): - internal_devices = router.router[l3_constants.INTERFACE_KEY] + internal_devices = router.router[constants.INTERFACE_KEY] self.assertTrue(len(internal_devices)) for device in internal_devices: self.assertTrue(self.device_exists_with_ips_and_mac( @@ -479,7 +480,7 @@ assert_ovs_bridge_empty(self.agent.conf.external_network_bridge) def floating_ips_configured(self, router): - floating_ips = router.router[l3_constants.FLOATINGIP_KEY] + floating_ips = router.router[constants.FLOATINGIP_KEY] external_port = router.get_ex_gw_port() return len(floating_ips) and all( ip_lib.device_exists_with_ips_and_mac( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_dvr_router.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_dvr_router.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_dvr_router.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_dvr_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,20 +13,22 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import functools import mock import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants import testtools from neutron.agent.l3 import agent as neutron_l3_agent +from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager -from neutron.agent.linux import utils from neutron.common import constants as n_const +from neutron.common import utils from neutron.extensions import portbindings from neutron.tests.common import l3_test_common from neutron.tests.common import machine_fixtures @@ -34,7 +36,7 @@ from neutron.tests.functional.agent.l3 import framework -DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' +DEVICE_OWNER_COMPUTE = lib_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class TestDvrRouter(framework.L3AgentTestFramework): @@ -55,10 +57,6 @@ self.agent.conf.agent_mode = 'dvr' self._test_update_floatingip_statuses(self.generate_dvr_router_info()) - def test_dvr_router_lifecycle_ha_with_snat_with_fips_nmtu(self): - self._dvr_router_lifecycle(enable_ha=True, enable_snat=True, - use_port_mtu=True) - def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self): self._dvr_router_lifecycle(enable_ha=False, enable_snat=False) @@ -84,6 +82,86 @@ self._assert_dvr_floating_ips(router) self._assert_snat_namespace_does_not_exist(router) + def test_dvr_router_fips_stale_gw_port(self): + self.agent.conf.agent_mode = 'dvr' + + # Create the router with external net + dvr_router_kwargs = {'ip_address': '19.4.4.3', + 'subnet_cidr': '19.4.4.0/24', + 'gateway_ip': '19.4.4.1', + 'gateway_mac': 'ca:fe:de:ab:cd:ef'} + router_info = self.generate_dvr_router_info(**dvr_router_kwargs) + external_gw_port = router_info['gw_port'] + ext_net_id = router_info['_floatingips'][0]['floating_network_id'] + self.mock_plugin_api.get_external_network_id.return_value(ext_net_id) + + # Create the fip namespace up front + stale_fip_ns = dvr_fip_ns.FipNamespace(ext_net_id, + self.agent.conf, + self.agent.driver, + self.agent.use_ipv6) + stale_fip_ns.create() + + # Add a stale fg port to the namespace + fixed_ip = external_gw_port['fixed_ips'][0] + float_subnet = external_gw_port['subnets'][0] + fip_gw_port_ip = str(netaddr.IPAddress(fixed_ip['ip_address']) + 10) + prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen + stale_agent_gw_port = { + 'subnets': [{'cidr': float_subnet['cidr'], + 'gateway_ip': float_subnet['gateway_ip'], + 'id': fixed_ip['subnet_id']}], + 'network_id': external_gw_port['network_id'], + 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, + 'mac_address': 'fa:16:3e:80:8f:89', + portbindings.HOST_ID: self.agent.conf.host, + 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], + 'ip_address': fip_gw_port_ip, + 'prefixlen': prefixlen}], + 'id': framework._uuid(), + 'device_id': framework._uuid()} + stale_fip_ns.create_gateway_port(stale_agent_gw_port) + + stale_dev_exists = self.device_exists_with_ips_and_mac( + stale_agent_gw_port, + stale_fip_ns.get_ext_device_name, + stale_fip_ns.get_name()) + self.assertTrue(stale_dev_exists) + + # Create the router, this shouldn't allow the duplicate port to stay + router = self.manage_router(self.agent, router_info) + + # Assert the device no longer exists + stale_dev_exists = self.device_exists_with_ips_and_mac( + stale_agent_gw_port, + stale_fip_ns.get_ext_device_name, + stale_fip_ns.get_name()) + self.assertFalse(stale_dev_exists) + + # Validate things are looking good and clean up + self._validate_fips_for_external_network( + router, router.fip_ns.get_name()) + ext_gateway_port = router_info['gw_port'] + self._delete_router(self.agent, router.router_id) + self._assert_fip_namespace_deleted(ext_gateway_port) + + def test_dvr_unused_snat_ns_deleted_when_agent_restarts_after_move(self): + """Test to validate the stale snat namespace delete with snat move. + + This test validates the stale snat namespace cleanup when + the agent restarts after the gateway port has been moved + from the agent. + """ + self.agent.conf.agent_mode = 'dvr_snat' + router_info = self.generate_dvr_router_info() + router1 = self.manage_router(self.agent, router_info) + self._assert_snat_namespace_exists(router1) + restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( + self.agent.host, self.agent.conf) + router1.router['gw_port_host'] = "my-new-host" + restarted_router = self.manage_router(restarted_agent, router1.router) + self._assert_snat_namespace_does_not_exist(restarted_router) + def test_dvr_router_fips_for_multiple_ext_networks(self): agent_mode = 'dvr' # Create the first router fip with external net1 @@ -109,9 +187,7 @@ self._validate_fips_for_external_network(router2, fip2_ns) def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False, - custom_mtu=2000, use_port_mtu=False, - ip_version=4, - dual_stack=False): + custom_mtu=2000, ip_version=4, dual_stack=False): '''Test dvr router lifecycle :param enable_ha: sets the ha value for the router. @@ -127,15 +203,13 @@ # We get the router info particular to a dvr router router_info = self.generate_dvr_router_info( enable_ha, enable_snat, extra_routes=True) - if use_port_mtu: - for key in ('_interfaces', '_snat_router_interfaces', - '_floatingip_agent_interfaces'): - for port in router_info[key]: - port['mtu'] = custom_mtu - router_info['gw_port']['mtu'] = custom_mtu + for key in ('_interfaces', '_snat_router_interfaces', + '_floatingip_agent_interfaces'): + for port in router_info[key]: + port['mtu'] = custom_mtu + router_info['gw_port']['mtu'] = custom_mtu + if enable_ha: router_info['_ha_interface']['mtu'] = custom_mtu - else: - self.agent.conf.network_device_mtu = custom_mtu # We need to mock the get_agent_gateway_port return value # because the whole L3PluginApi is mocked and we need the port @@ -165,7 +239,7 @@ # not when it ends. Thus, we have to wait until keepalived finishes # configuring everything. We verify this by waiting until the last # device has an IP address. - device = router.router[l3_constants.INTERFACE_KEY][-1] + device = router.router[lib_constants.INTERFACE_KEY][-1] device_exists = functools.partial( self.device_exists_with_ips_and_mac, device, @@ -224,7 +298,7 @@ num_internal_ports=2, enable_gw=enable_gw, **kwargs) - internal_ports = router.get(l3_constants.INTERFACE_KEY, []) + internal_ports = router.get(lib_constants.INTERFACE_KEY, []) router['distributed'] = True router['gw_port_host'] = agent.conf.host @@ -265,7 +339,7 @@ 'gateway_ip': float_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': external_gw_port['network_id'], - 'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW, + 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, 'mac_address': 'fa:16:3e:80:8d:89', portbindings.HOST_ID: self.agent.conf.host, 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], @@ -296,7 +370,7 @@ 'gateway_ip': snat_subnet['gateway_ip'], 'id': fixed_ip['subnet_id']}], 'network_id': port['network_id'], - 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT, + 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:89', 'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'], 'ip_address': snat_ip, @@ -383,7 +457,7 @@ # in the fip namespace: # Check that the fg- (floatingip_agent_gateway) # is created with the ip address of the external gateway port - floating_ips = router.router[l3_constants.FLOATINGIP_KEY] + floating_ips = router.router[lib_constants.FLOATINGIP_KEY] self.assertTrue(floating_ips) # We need to fetch the floatingip agent gateway port info # from the router_info @@ -430,7 +504,7 @@ self.assertTrue(self._namespace_exists(fip_ns)) restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) - router1.router[l3_constants.FLOATINGIP_KEY] = [] + router1.router[lib_constants.FLOATINGIP_KEY] = [] self.manage_router(restarted_agent, router1.router) self._assert_dvr_snat_gateway(router1) self.assertTrue(self._namespace_exists(fip_ns)) @@ -439,7 +513,7 @@ self.agent.conf.agent_mode = 'dvr' router_info = self.generate_dvr_router_info() router = self.manage_router(self.agent, router_info) - floating_ips = router.router[l3_constants.FLOATINGIP_KEY] + floating_ips = router.router[lib_constants.FLOATINGIP_KEY] router_ns = router.ns_name fip_rule_prio_1 = self._get_fixed_ip_rule_priority( router_ns, floating_ips[0]['fixed_ip_address']) @@ -452,6 +526,22 @@ router_ns, floating_ips[0]['fixed_ip_address']) self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2) + def test_dvr_router_floating_ip_moved(self): + self.agent.conf.agent_mode = 'dvr' + router_info = self.generate_dvr_router_info() + router = self.manage_router(self.agent, router_info) + floating_ips = router.router[lib_constants.FLOATINGIP_KEY] + router_ns = router.ns_name + fixed_ip = floating_ips[0]['fixed_ip_address'] + self.assertTrue(self._fixed_ip_rule_exists(router_ns, fixed_ip)) + # Floating IP reassigned to another fixed IP + new_fixed_ip = '10.0.0.2' + self.assertNotEqual(new_fixed_ip, fixed_ip) + floating_ips[0]['fixed_ip_address'] = new_fixed_ip + self.agent._process_updated_router(router.router) + self.assertFalse(self._fixed_ip_rule_exists(router_ns, fixed_ip)) + self.assertTrue(self._fixed_ip_rule_exists(router_ns, new_fixed_ip)) + def _assert_iptables_rules_exist( self, router_iptables_manager, table_name, expected_rules): rules = router_iptables_manager.get_rules_for_table(table_name) @@ -486,6 +576,17 @@ info = iprule.rule._parse_line(4, line) return info['priority'] + def _fixed_ip_rule_exists(self, namespace, ip): + iprule = ip_lib.IPRule(namespace) + lines = iprule.rule._as_root([4], ['show']).splitlines() + for line in lines: + if ip in line: + info = iprule.rule._parse_line(4, line) + if info['from'] == ip: + return True + + return False + def test_dvr_router_add_internal_network_set_arp_cache(self): # Check that, when the router is set up and there are # existing ports on the uplinked subnet, the ARP @@ -629,7 +730,7 @@ self.assertEqual(router1.snat_ports, snat_internal_port) # Now let us not pass INTERFACE_KEY, to emulate # the interface has been removed. - router1.router[l3_constants.INTERFACE_KEY] = [] + router1.router[lib_constants.INTERFACE_KEY] = [] # Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate # that the server did not send it, since the interface has been # removed. @@ -778,6 +879,34 @@ self._assert_extra_routes(router_updated, namespace=snat_ns_name) self._assert_extra_routes(router_updated) + def test_dvr_router_gateway_update_to_none(self): + self.agent.conf.agent_mode = 'dvr_snat' + router_info = self.generate_dvr_router_info(enable_snat=True) + router = self.manage_router(self.agent, router_info) + gw_port = router.get_ex_gw_port() + ex_gw_port_name = router.get_external_device_name(gw_port['id']) + ex_gw_device = ip_lib.IPDevice(ex_gw_port_name, + namespace=router.snat_namespace.name) + fg_port = router.fip_ns.agent_gateway_port + fg_port_name = router.fip_ns.get_ext_device_name(fg_port['id']) + fg_device = ip_lib.IPDevice(fg_port_name, + namespace=router.fip_ns.name) + self.assertIn('gateway', ex_gw_device.route.get_gateway()) + self.assertIn('gateway', fg_device.route.get_gateway()) + + # Make this copy to make agent think gw_port changed. + router.ex_gw_port = copy.deepcopy(router.ex_gw_port) + for subnet in gw_port['subnets']: + subnet['gateway_ip'] = None + new_fg_port = copy.deepcopy(fg_port) + for subnet in new_fg_port['subnets']: + subnet['gateway_ip'] = None + + router.router[n_const.FLOATINGIP_AGENT_INTF_KEY] = [new_fg_port] + router.process(self.agent) + self.assertIsNone(ex_gw_device.route.get_gateway()) + self.assertIsNone(fg_device.route.get_gateway()) + def _assert_fip_namespace_deleted(self, ext_gateway_port): ext_net_id = ext_gateway_port['network_id'] fip_ns = self.agent.get_fip_ns(ext_net_id) @@ -794,20 +923,20 @@ internal_address_scope2, gw_address_scope=None): router_info = self.generate_dvr_router_info(enable_snat=True) address_scope1 = { - str(l3_constants.IP_VERSION_4): internal_address_scope1} + str(lib_constants.IP_VERSION_4): internal_address_scope1} address_scope2 = { - str(l3_constants.IP_VERSION_4): internal_address_scope2} + str(lib_constants.IP_VERSION_4): internal_address_scope2} if gw_address_scope: router_info['gw_port']['address_scopes'] = { - str(l3_constants.IP_VERSION_4): gw_address_scope} - router_info[l3_constants.INTERFACE_KEY][0]['address_scopes'] = ( + str(lib_constants.IP_VERSION_4): gw_address_scope} + router_info[lib_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) - router_info[l3_constants.INTERFACE_KEY][1]['address_scopes'] = ( + router_info[lib_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) # Renew the address scope router_info[n_const.SNAT_ROUTER_INTF_KEY] = [] self._add_snat_port_info_to_router( - router_info, router_info[l3_constants.INTERFACE_KEY]) + router_info, router_info[lib_constants.INTERFACE_KEY]) router = self.manage_router(self.agent, router_info) router_ip_cidr1 = self._port_first_ip_cidr(router.internal_ports[0]) @@ -836,8 +965,8 @@ 'scope1', 'scope1') # Internal networks that are in the same address scope can connected # each other - net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip, 5) - net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip, 5) + net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip) + net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip) def test_connection_from_diff_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' @@ -854,7 +983,7 @@ (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') - router.router[l3_constants.FLOATINGIP_KEY] = [] + router.router[lib_constants.FLOATINGIP_KEY] = [] fip_same_scope = '19.4.4.10' self._add_fip(router, fip_same_scope, fixed_address=machine_same_scope.ip, @@ -872,8 +1001,8 @@ src_machine = self.useFixture( machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24')) # Floating ip should work no matter of address scope - net_helpers.assert_ping(src_machine.namespace, fip_same_scope, 5) - net_helpers.assert_ping(src_machine.namespace, fip_diff_scope, 5) + net_helpers.assert_ping(src_machine.namespace, fip_same_scope) + net_helpers.assert_ping(src_machine.namespace, fip_diff_scope) def test_direct_route_for_address_scope(self): self.agent.conf.agent_mode = 'dvr_snat' @@ -889,8 +1018,7 @@ machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24', gw_ip)) # For the internal networks that are in the same address scope as # external network, they can directly route to external network - net_helpers.assert_ping( - src_machine.namespace, machine_same_scope.ip, 5) + net_helpers.assert_ping(src_machine.namespace, machine_same_scope.ip) # For the internal networks that are not in the same address scope as # external networks. SNAT will be used. Direct route will not work # here. diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_ha_router.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_ha_router.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_ha_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_ha_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,14 +16,14 @@ import copy import mock -from neutron_lib import constants as l3_constants +from neutron_lib import constants import six +import testtools from neutron.agent.l3 import agent as neutron_l3_agent from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils -from neutron.common import constants +from neutron.common import ipv6_utils from neutron.common import utils as common_utils from neutron.tests.common import l3_test_common from neutron.tests.common import net_helpers @@ -41,12 +41,12 @@ self.agent, 'enqueue_state_change').start() router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) - utils.wait_until_true(lambda: router.ha_state == 'master') + common_utils.wait_until_true(lambda: router.ha_state == 'master') self.fail_ha_router(router) - utils.wait_until_true(lambda: router.ha_state == 'backup') + common_utils.wait_until_true(lambda: router.ha_state == 'backup') - utils.wait_until_true(lambda: enqueue_mock.call_count == 3) + common_utils.wait_until_true(lambda: enqueue_mock.call_count == 3) calls = [args[0] for args in enqueue_mock.call_args_list] self.assertEqual((router.router_id, 'backup'), calls[0]) self.assertEqual((router.router_id, 'master'), calls[1]) @@ -71,9 +71,9 @@ router_info = self.generate_router_info(enable_ha=True) router2 = self.manage_router(self.agent, router_info) - utils.wait_until_true(lambda: router1.ha_state == 'backup') - utils.wait_until_true(lambda: router2.ha_state == 'master') - utils.wait_until_true( + common_utils.wait_until_true(lambda: router1.ha_state == 'backup') + common_utils.wait_until_true(lambda: router2.ha_state == 'master') + common_utils.wait_until_true( lambda: self._expected_rpc_report( {router1.router_id: 'standby', router2.router_id: 'active'})) @@ -98,6 +98,20 @@ self._router_lifecycle(enable_ha=True, dual_stack=True, v6_ext_gw_with_sub=False) + @testtools.skipUnless(ipv6_utils.is_enabled(), "IPv6 is not enabled") + def test_ipv6_router_advts_after_router_state_change(self): + # Schedule router to l3 agent, and then add router gateway. Verify + # that router gw interface is configured to receive Router Advts. + router_info = l3_test_common.prepare_router_data( + enable_snat=True, enable_ha=True, dual_stack=True, enable_gw=False) + router = self.manage_router(self.agent, router_info) + common_utils.wait_until_true(lambda: router.ha_state == 'master') + _ext_dev_name, ex_port = l3_test_common.prepare_ext_gw_test( + mock.Mock(), router) + router_info['gw_port'] = ex_port + router.process(self.agent) + self._assert_ipv6_accept_ra(router) + def test_keepalived_configuration(self): router_info = self.generate_router_info(enable_ha=True) router = self.manage_router(self.agent, router_info) @@ -151,7 +165,8 @@ restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport( self.agent.host, self.agent.conf) self.manage_router(restarted_agent, router1.router) - utils.wait_until_true(lambda: self.floating_ips_configured(router1)) + common_utils.wait_until_true( + lambda: self.floating_ips_configured(router1)) self.assertIn( router1._get_primary_vip(), self._get_addresses_on_device( @@ -161,11 +176,11 @@ def test_ha_router_ipv6_radvd_status(self): router_info = self.generate_router_info(ip_version=6, enable_ha=True) router1 = self.manage_router(self.agent, router_info) - utils.wait_until_true(lambda: router1.ha_state == 'master') - utils.wait_until_true(lambda: router1.radvd.enabled) + common_utils.wait_until_true(lambda: router1.ha_state == 'master') + common_utils.wait_until_true(lambda: router1.radvd.enabled) def _check_lla_status(router, expected): - internal_devices = router.router[l3_constants.INTERFACE_KEY] + internal_devices = router.router[constants.INTERFACE_KEY] for device in internal_devices: lladdr = ip_lib.get_ipv6_lladdr(device['mac_address']) exists = ip_lib.device_exists_with_ips_and_mac( @@ -179,8 +194,9 @@ ha_device = ip_lib.IPDevice(device_name, namespace=router1.ns_name) ha_device.link.set_down() - utils.wait_until_true(lambda: router1.ha_state == 'backup') - utils.wait_until_true(lambda: not router1.radvd.enabled, timeout=10) + common_utils.wait_until_true(lambda: router1.ha_state == 'backup') + common_utils.wait_until_true( + lambda: not router1.radvd.enabled, timeout=10) _check_lla_status(router1, False) def test_ha_router_process_ipv6_subnets_to_existing_port(self): @@ -193,7 +209,7 @@ for ip_addr in ip_cidrs: self.assertIn(ip_addr, config) - interface_id = router.router[l3_constants.INTERFACE_KEY][0]['id'] + interface_id = router.router[constants.INTERFACE_KEY][0]['id'] slaac = constants.IPV6_SLAAC slaac_mode = {'ra_mode': slaac, 'address_mode': slaac} @@ -202,11 +218,11 @@ ip_version=6, ipv6_subnet_modes=[slaac_mode], interface_id=interface_id) router.process(self.agent) - utils.wait_until_true(lambda: router.ha_state == 'master') + common_utils.wait_until_true(lambda: router.ha_state == 'master') # Verify that router internal interface is present and is configured # with IP address from both the subnets. - internal_iface = router.router[l3_constants.INTERFACE_KEY][0] + internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(2, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) @@ -215,16 +231,16 @@ # Remove one subnet from the router internal iface interfaces = copy.deepcopy(router.router.get( - l3_constants.INTERFACE_KEY, [])) + constants.INTERFACE_KEY, [])) fixed_ips, subnets = [], [] fixed_ips.append(interfaces[0]['fixed_ips'][0]) subnets.append(interfaces[0]['subnets'][0]) interfaces[0].update({'fixed_ips': fixed_ips, 'subnets': subnets}) - router.router[l3_constants.INTERFACE_KEY] = interfaces + router.router[constants.INTERFACE_KEY] = interfaces router.process(self.agent) # Verify that router internal interface has a single ipaddress - internal_iface = router.router[l3_constants.INTERFACE_KEY][0] + internal_iface = router.router[constants.INTERFACE_KEY][0] self.assertEqual(1, len(internal_iface['fixed_ips'])) self._assert_internal_devices(router) @@ -236,7 +252,7 @@ router = self.manage_router(self.agent, router_info) self.fail_ha_router(router) - utils.wait_until_true(lambda: router.ha_state == 'backup') + common_utils.wait_until_true(lambda: router.ha_state == 'backup') # The purpose of the test is to simply make sure no exception is raised port = router.get_ex_gw_port() @@ -248,10 +264,10 @@ router = self.manage_router(self.agent, router_info) ex_gw_port = router.get_ex_gw_port() interface_name = router.get_external_device_interface_name(ex_gw_port) - utils.wait_until_true(lambda: router.ha_state == 'master') + common_utils.wait_until_true(lambda: router.ha_state == 'master') self._add_fip(router, '172.168.1.20', fixed_address='10.0.0.3') router.process(self.agent) - router.router[l3_constants.FLOATINGIP_KEY] = [] + router.router[constants.FLOATINGIP_KEY] = [] # The purpose of the test is to simply make sure no exception is raised # Because router.process will consume the FloatingIpSetupException, # call the configure_fip_addresses directly here @@ -285,7 +301,7 @@ router1 = self.manage_router(self.agent, router_info) router_info_2 = copy.deepcopy(router_info) - router_info_2[l3_constants.HA_INTERFACE_KEY] = ( + router_info_2[constants.HA_INTERFACE_KEY] = ( l3_test_common.get_ha_interface(ip='169.254.192.2', mac='22:22:22:22:22:22')) @@ -294,10 +310,10 @@ self.NESTED_NAMESPACE_SEPARATOR, self.failover_agent.host) router2 = self.manage_router(self.failover_agent, router_info_2) - utils.wait_until_true(lambda: router1.ha_state == 'master') - utils.wait_until_true(lambda: router2.ha_state == 'backup') + common_utils.wait_until_true(lambda: router1.ha_state == 'master') + common_utils.wait_until_true(lambda: router2.ha_state == 'backup') self.fail_ha_router(router1) - utils.wait_until_true(lambda: router2.ha_state == 'master') - utils.wait_until_true(lambda: router1.ha_state == 'backup') + common_utils.wait_until_true(lambda: router2.ha_state == 'master') + common_utils.wait_until_true(lambda: router1.ha_state == 'backup') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_keepalived_state_change.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_keepalived_state_change.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_keepalived_state_change.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_keepalived_state_change.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,12 +15,11 @@ import os import mock -from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_utils import uuidutils -from neutron._i18n import _ from neutron.agent.l3 import keepalived_state_change +from neutron.conf.agent.l3 import keepalived as kd from neutron.tests.functional import base @@ -28,12 +27,7 @@ def setUp(self): super(TestKeepalivedStateChange, self).setUp() self.conf_fixture = self.useFixture(fixture_config.Config()) - self.conf_fixture.register_opt( - cfg.StrOpt('metadata_proxy_socket', - default='$state_path/metadata_proxy', - help=_('Location of Metadata Proxy UNIX domain ' - 'socket'))) - + kd.register_l3_agent_keepalived_opts(self.conf_fixture) self.router_id = uuidutils.generate_uuid() self.conf_dir = self.get_default_temp_dir().path self.cidr = '169.254.128.1/24' diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_legacy_router.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_legacy_router.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/l3/test_legacy_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/l3/test_legacy_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,9 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import mock -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces @@ -23,6 +24,7 @@ from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources +from neutron.tests import base as tests_base from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l3 import framework @@ -78,6 +80,56 @@ self._router_lifecycle(enable_ha=False, dual_stack=True, v6_ext_gw_with_sub=False) + def test_legacy_router_gateway_update_to_none(self): + router_info = self.generate_router_info(False) + router = self.manage_router(self.agent, router_info) + gw_port = router.get_ex_gw_port() + interface_name = router.get_external_device_name(gw_port['id']) + device = ip_lib.IPDevice(interface_name, namespace=router.ns_name) + self.assertIn('gateway', device.route.get_gateway()) + + # Make this copy, so that the agent will think there is change in + # external gateway port. + router.ex_gw_port = copy.deepcopy(router.ex_gw_port) + for subnet in gw_port['subnets']: + subnet['gateway_ip'] = None + router.process(self.agent) + + self.assertIsNone(device.route.get_gateway()) + + def _make_bridge(self): + bridge = framework.get_ovs_bridge(tests_base.get_rand_name()) + bridge.create() + self.addCleanup(bridge.destroy) + return bridge + + def test_external_network_bridge_change(self): + bridge1, bridge2 = self._make_bridge(), self._make_bridge() + self.agent.conf.set_override('external_network_bridge', + bridge1.br_name) + router_info = self.generate_router_info(False) + router = self.manage_router(self.agent, router_info) + gw_port = router.router['gw_port'] + gw_inf_name = router.get_external_device_name(gw_port['id']) + + self.assertIn(gw_inf_name, + [v.port_name for v in bridge1.get_vif_ports()]) + # changeing the external_network_bridge should have no impact since + # the interface exists. + self.agent.conf.set_override('external_network_bridge', + bridge2.br_name) + self.manage_router(self.agent, router_info) + self.assertIn(gw_inf_name, + [v.port_name for v in bridge1.get_vif_ports()]) + self.assertNotIn(gw_inf_name, + [v.port_name for v in bridge2.get_vif_ports()]) + namespaces.Namespace.delete(router.router_namespace) + self.manage_router(self.agent, router_info) + self.assertIn(gw_inf_name, + [v.port_name for v in bridge2.get_vif_ports()]) + self.assertNotIn(gw_inf_name, + [v.port_name for v in bridge1.get_vif_ports()]) + def test_legacy_router_ns_rebuild(self): router_info = self.generate_router_info(False) router = self.manage_router(self.agent, router_info) @@ -85,7 +137,7 @@ gw_inf_name = router.get_external_device_name(gw_port['id']) gw_device = ip_lib.IPDevice(gw_inf_name, namespace=router.ns_name) router_ports = [gw_device] - for i_port in router_info.get(l3_constants.INTERFACE_KEY, []): + for i_port in router_info.get(lib_constants.INTERFACE_KEY, []): interface_name = router.get_internal_device_name(i_port['id']) router_ports.append( ip_lib.IPDevice(interface_name, namespace=router.ns_name)) @@ -213,7 +265,7 @@ router_ip)).machines dst_fip = '19.4.4.10' - router.router[l3_constants.FLOATINGIP_KEY] = [] + router.router[lib_constants.FLOATINGIP_KEY] = [] self._add_fip(router, dst_fip, fixed_address=dst_machine.ip) router.process(self.agent) @@ -228,7 +280,7 @@ src_machine, dst_machine, dst_fip = ( self._setup_fip_with_fixed_ip_from_same_subnet(enable_snat=True)) protocol_port = net_helpers.get_free_namespace_port( - l3_constants.PROTO_NAME_TCP, dst_machine.namespace) + lib_constants.PROTO_NAME_TCP, dst_machine.namespace) # client sends to fip netcat = net_helpers.NetcatTester( src_machine.namespace, dst_machine.namespace, @@ -252,15 +304,15 @@ router_info = self.generate_router_info(enable_ha=False, num_internal_ports=2) address_scope1 = { - str(l3_constants.IP_VERSION_4): internal_address_scope1} + str(lib_constants.IP_VERSION_4): internal_address_scope1} address_scope2 = { - str(l3_constants.IP_VERSION_4): internal_address_scope2} + str(lib_constants.IP_VERSION_4): internal_address_scope2} if gw_address_scope: router_info['gw_port']['address_scopes'] = { - str(l3_constants.IP_VERSION_4): gw_address_scope} - router_info[l3_constants.INTERFACE_KEY][0]['address_scopes'] = ( + str(lib_constants.IP_VERSION_4): gw_address_scope} + router_info[lib_constants.INTERFACE_KEY][0]['address_scopes'] = ( address_scope1) - router_info[l3_constants.INTERFACE_KEY][1]['address_scopes'] = ( + router_info[lib_constants.INTERFACE_KEY][1]['address_scopes'] = ( address_scope2) router = self.manage_router(self.agent, router_info) @@ -289,8 +341,8 @@ 'scope1', 'scope1') # Internal networks that are in the same address scope can connected # each other - net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip, 5) - net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip, 5) + net_helpers.assert_ping(test_machine1.namespace, test_machine2.ip) + net_helpers.assert_ping(test_machine2.namespace, test_machine1.ip) def test_connection_from_diff_address_scope(self): test_machine1, test_machine2, _ = self._setup_address_scope( @@ -304,7 +356,7 @@ (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') - router.router[l3_constants.FLOATINGIP_KEY] = [] + router.router[lib_constants.FLOATINGIP_KEY] = [] fip_same_scope = '19.4.4.10' self._add_fip(router, fip_same_scope, fixed_address=machine_same_scope.ip, @@ -320,8 +372,8 @@ src_machine = self.useFixture( machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24')) # Floating ip should work no matter of address scope - net_helpers.assert_ping(src_machine.namespace, fip_same_scope, 5) - net_helpers.assert_ping(src_machine.namespace, fip_diff_scope, 5) + net_helpers.assert_ping(src_machine.namespace, fip_same_scope) + net_helpers.assert_ping(src_machine.namespace, fip_diff_scope) def test_direct_route_for_address_scope(self): (machine_same_scope, machine_diff_scope, @@ -336,8 +388,7 @@ machine_fixtures.FakeMachine(br_ex, '19.4.4.12/24', gw_ip)) # For the internal networks that are in the same address scope as # external network, they can directly route to external network - net_helpers.assert_ping( - src_machine.namespace, machine_same_scope.ip, 5) + net_helpers.assert_ping(src_machine.namespace, machine_same_scope.ip) # For the internal networks that are not in the same address scope as # external networks. SNAT will be used. Direct route will not work # here. @@ -347,7 +398,7 @@ (machine_same_scope, machine_diff_scope, router) = self._setup_address_scope('scope1', 'scope2', 'scope1') - router.router[l3_constants.FLOATINGIP_KEY] = [] + router.router[lib_constants.FLOATINGIP_KEY] = [] fip = '19.4.4.11' self._add_fip(router, fip, fixed_address=machine_diff_scope.ip, @@ -356,8 +407,8 @@ # For the internal networks that are in the same address scope as # external network, they should be able to reach the floating ip - net_helpers.assert_ping(machine_same_scope.namespace, fip, 5) + net_helpers.assert_ping(machine_same_scope.namespace, fip) # For the port with fip, it should be able to reach the internal # networks that are in the same address scope as external network net_helpers.assert_ping(machine_diff_scope.namespace, - machine_same_scope.ip, 5) + machine_same_scope.ip) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_async_process.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_async_process.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_async_process.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_async_process.py 2016-08-03 20:10:34.000000000 +0000 @@ -18,6 +18,7 @@ from neutron._i18n import _ from neutron.agent.linux import async_process from neutron.agent.linux import utils +from neutron.common import utils as common_utils from neutron.tests import base @@ -72,7 +73,7 @@ self._check_stdout(proc) pid = proc.pid utils.execute(['kill', '-9', pid]) - utils.wait_until_true( + common_utils.wait_until_true( lambda: proc.is_active() and pid != proc.pid, timeout=5, sleep=0.01, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_bridge_lib.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_bridge_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_bridge_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_bridge_lib.py 2016-08-29 20:05:49.000000000 +0000 @@ -50,7 +50,7 @@ t2 = bridge_lib.get_interface_bridged_time(port) self.assertIsNotNone(t1) self.assertIsNotNone(t2) - self.assertGreater(t2, t1) + self.assertGreaterEqual(t2, t1) def test_get_interface_bridge(self): bridge = bridge_lib.BridgeDevice.get_interface_bridge( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_dhcp.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_dhcp.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_dhcp.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_dhcp.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,11 +16,12 @@ from oslo_config import cfg from neutron.agent.common import config -from neutron.agent.dhcp import config as dhcp_conf from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.agent.linux import ip_lib -from neutron.common import config as common_conf +from neutron.common import utils as common_utils +from neutron.conf.agent import dhcp as dhcp_conf +from neutron.conf import common as common_conf from neutron.tests import base as tests_base from neutron.tests.common import net_helpers from neutron.tests.functional import base as functional_base @@ -79,7 +80,10 @@ self.assertEqual(2, len(devices)) # setting up dhcp for the network dev_mgr.setup(tests_base.AttributeDict(network)) + common_utils.wait_until_true( + lambda: 1 == len(ipw.get_devices(exclude_loopback=True)), + timeout=5, + sleep=0.1, + exception=RuntimeError("only one non-loopback device must remain")) devices = ipw.get_devices(exclude_loopback=True) - # only one non-loopback device should remain - self.assertEqual(1, len(devices)) self.assertEqual("tapfoo_port_id", devices[0].name) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_keepalived.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_keepalived.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_keepalived.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_keepalived.py 2016-08-03 20:10:34.000000000 +0000 @@ -19,6 +19,7 @@ from neutron.agent.linux import external_process from neutron.agent.linux import keepalived from neutron.agent.linux import utils +from neutron.common import utils as common_utils from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional import base from neutron.tests.unit.agent.linux import test_keepalived @@ -42,7 +43,7 @@ def _spawn_keepalived(self, keepalived_manager): keepalived_manager.spawn() process = keepalived_manager.get_process() - utils.wait_until_true( + common_utils.wait_until_true( lambda: process.active, timeout=5, sleep=0.01, @@ -63,7 +64,7 @@ # Exit the process, and see that when it comes back # It's indeed a different process utils.execute(['kill', exit_code, pid], run_as_root=True) - utils.wait_until_true( + common_utils.wait_until_true( lambda: process.active and pid != process.pid, timeout=5, sleep=0.01, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py 2016-06-06 16:54:53.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py 2016-08-03 20:10:34.000000000 +0000 @@ -27,7 +27,7 @@ from neutron.agent.common import ovs_lib from neutron.agent.linux import ovsdb_monitor -from neutron.agent.linux import utils +from neutron.common import utils from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional import base as functional_base diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_process_monitor.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_process_monitor.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_process_monitor.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_process_monitor.py 2016-08-03 20:10:34.000000000 +0000 @@ -18,7 +18,7 @@ from six import moves from neutron.agent.linux import external_process -from neutron.agent.linux import utils +from neutron.common import utils from neutron.tests import base from neutron.tests.functional.agent.linux import simple_daemon diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_tc_lib.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_tc_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_tc_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_tc_lib.py 2016-08-03 20:10:34.000000000 +0000 @@ -13,14 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -from oslo_log import log as logging - from neutron.agent.linux import ip_lib from neutron.agent.linux import tc_lib from neutron.tests.functional import base as functional_base -LOG = logging.getLogger(__name__) - TEST_HZ_VALUE = 250 LATENCY = 50 BW_LIMIT = 1024 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_utils.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/linux/test_utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/linux/test_utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,11 +14,9 @@ import functools -import eventlet -import testtools - from neutron.agent.linux import async_process from neutron.agent.linux import utils +from neutron.common import utils as common_utils from neutron.tests.functional.agent.linux import test_async_process from neutron.tests.functional import base as functional_base @@ -35,13 +33,6 @@ self.assertTrue(utils.pid_invoked_with_cmdline(pid, cmd)) self.assertEqual([], utils.get_cmdline_from_pid(-1)) - def test_wait_until_true_predicate_succeeds(self): - utils.wait_until_true(lambda: True) - - def test_wait_until_true_predicate_fails(self): - with testtools.ExpectedException(eventlet.timeout.Timeout): - utils.wait_until_true(lambda: False, 2) - class TestGetRootHelperChildPid(functional_base.BaseSudoTestCase): def _addcleanup_sleep_process(self, parent_pid): @@ -56,7 +47,7 @@ def test_get_root_helper_child_pid_returns_first_child(self): """Test that the first child, not lowest child pid is returned. - Test creates following proccess tree: + Test creates following process tree: sudo + | +--rootwrap + @@ -83,7 +74,7 @@ # don't want to use proc.start(block=True) as that uses # get_root_helper_child_pid (The method under test) internally. sudo_pid = proc._process.pid - utils.wait_until_true( + common_utils.wait_until_true( functools.partial( wait_for_sleep_is_spawned, sudo_pid), diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/ovsdb/test_impl_idl.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/ovsdb/test_impl_idl.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/ovsdb/test_impl_idl.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/ovsdb/test_impl_idl.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,75 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.agent.common import ovs_lib +from neutron.agent.ovsdb import api +from neutron.agent.ovsdb import impl_idl +from neutron.tests import base as test_base +from neutron.tests.common import net_helpers +from neutron.tests.functional import base + + +# NOTE(twilson) functools.partial does not work for this +def trpatch(*args, **kwargs): + def wrapped(fn): + return mock.patch.object(impl_idl.NeutronOVSDBTransaction, + *args, **kwargs)(fn) + return wrapped + + +class ImplIdlTestCase(base.BaseSudoTestCase): + def setUp(self): + super(ImplIdlTestCase, self).setUp() + self.config(group='OVS', ovsdb_interface='native') + self.ovs = ovs_lib.BaseOVS() + self.brname = test_base.get_rand_device_name(net_helpers.BR_PREFIX) + # Make sure exceptions pass through by calling do_post_commit directly + mock.patch.object( + impl_idl.NeutronOVSDBTransaction, "post_commit", + side_effect=impl_idl.NeutronOVSDBTransaction.do_post_commit, + autospec=True).start() + + def _add_br(self): + # NOTE(twilson) we will be raising exceptions with add_br, so schedule + # cleanup before that. + self.addCleanup(self.ovs.delete_bridge, self.brname) + ovsdb = self.ovs.ovsdb + with ovsdb.transaction(check_error=True) as tr: + tr.add(ovsdb.add_br(self.brname)) + return tr + + def _add_br_and_test(self): + self._add_br() + ofport = self.ovs.db_get_val("Interface", self.brname, "ofport") + self.assertTrue(int(ofport)) + self.assertTrue(ofport > -1) + + def test_post_commit_vswitchd_completed_no_failures(self): + self._add_br_and_test() + + @trpatch("vswitchd_has_completed", return_value=True) + @trpatch("post_commit_failed_interfaces", return_value=["failed_if1"]) + @trpatch("timeout_exceeded", return_value=False) + def test_post_commit_vswitchd_completed_failures(self, *args): + self.assertRaises(impl_idl.VswitchdInterfaceAddException, self._add_br) + + @trpatch("vswitchd_has_completed", return_value=False) + def test_post_commit_vswitchd_incomplete_timeout(self, *args): + # Due to timing issues we may rarely hit the global timeout, which + # raises RuntimeError to match the vsctl implementation + self.ovs.vsctl_timeout = 3 + self.assertRaises((api.TimeoutException, RuntimeError), self._add_br) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_dhcp_agent.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_dhcp_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_dhcp_agent.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_dhcp_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -32,7 +32,6 @@ from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils -from neutron.common import constants from neutron.common import utils as common_utils from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import helpers @@ -112,7 +111,7 @@ "ipv6_ra_mode": None, "ipv6_address_mode": None}) if ip_version == 6: - sn_dict['ipv6_address_mode'] = constants.DHCPV6_STATEFUL + sn_dict['ipv6_address_mode'] = lib_const.DHCPV6_STATEFUL return sn_dict def create_port_dict(self, network_id, subnet_id, mac_address, @@ -201,7 +200,7 @@ predicate = lambda: len( self._ip_list_for_vif(vif_name, network.namespace)) - utils.wait_until_true(predicate, 10) + common_utils.wait_until_true(predicate, 10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) cidr = ip_list[0].get('cidr') @@ -286,7 +285,7 @@ self.addCleanup(self.agent.disable_isolated_metadata_proxy, network) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) - utils.wait_until_true( + common_utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.01, @@ -298,7 +297,7 @@ old_pid = pm.pid utils.execute(['kill', '-9', old_pid], run_as_root=True) - utils.wait_until_true( + common_utils.wait_until_true( lambda: pm.active and pm.pid != old_pid, timeout=5, sleep=0.1, @@ -309,7 +308,7 @@ self.conf.set_override('enable_isolated_metadata', False) self.configure_dhcp_for_network(network=network) - utils.wait_until_true( + common_utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, @@ -325,7 +324,7 @@ self.agent.start_ready_ports_loop() self.configure_dhcp_for_network(network) ports_to_send = {p.id for p in network.ports} - utils.wait_until_true( + common_utils.wait_until_true( lambda: self.mock_plugin_api.dhcp_ready_on_ports.called, timeout=1, sleep=0.1, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_firewall.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_firewall.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_firewall.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_firewall.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,7 +19,6 @@ import copy import functools -import random import netaddr from neutron_lib import constants @@ -30,11 +29,13 @@ from neutron.agent import firewall from neutron.agent.linux import iptables_firewall from neutron.agent.linux import openvswitch_firewall -from neutron.agent import securitygroups_rpc as sg_cfg from neutron.cmd.sanity import checks +from neutron.conf.agent import securitygroups_rpc as security_config from neutron.tests.common import conn_testers +from neutron.tests.common import helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional import base +from neutron.tests.functional import constants as test_constants LOG = logging.getLogger(__name__) @@ -50,7 +51,6 @@ conn_testers.ConnectionTester.UDP: conn_testers.ConnectionTester.TCP} DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' -VLAN_COUNT = 4096 def skip_if_firewall(firewall_name): @@ -91,10 +91,10 @@ scenarios = scenarios_iptables + scenarios_ovs_fw_interfaces ip_cidr = None - vlan_range = set(range(VLAN_COUNT)) + vlan_range = set(range(test_constants.VLAN_COUNT)) def setUp(self): - cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP') + security_config.register_securitygroups_opts() super(BaseFirewallTestCase, self).setUp() self.tester, self.firewall = getattr(self, self.initialize)() if self.firewall_name == "openvswitch": @@ -124,7 +124,7 @@ self.skipTest("Open vSwitch with conntrack is not installed " "on this machine. To run tests for OVS/CT firewall," " please meet the requirements (kernel>=4.3, " - "OVS>=2.5. More info at" + "OVS>=2.5). More info at " "https://github.com/openvswitch/ovs/blob/master/" "FAQ.md") tester = self.useFixture( @@ -133,18 +133,12 @@ return tester, firewall_drv def assign_vlan_to_peers(self): - vlan = self.get_not_used_vlan() + vlan = helpers.get_not_used_vlan(self.firewall.int_br.br, + self.vlan_range) LOG.debug("Using %d vlan tag for this test", vlan) self.tester.set_vm_tag(vlan) self.tester.set_peer_tag(vlan) - def get_not_used_vlan(self): - port_vlans = self.firewall.int_br.br.ovsdb.db_find( - 'Port', ('tag', '!=', []), columns=['tag']).execute() - used_vlan_tags = {val['tag'] for val in port_vlans} - available_vlans = self.vlan_range - used_vlan_tags - return random.choice(list(available_vlans)) - @staticmethod def _create_port_description(port_id, ip_addresses, mac_address, sg_ids): return {'admin_state_up': True, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_l2_ovs_agent.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_l2_ovs_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_l2_ovs_agent.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_l2_ovs_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -17,7 +17,8 @@ import time from eventlet.timeout import Timeout -from neutron.agent.linux import utils as agent_utils + +from neutron.common import utils from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.common import net_helpers from neutron.tests.functional.agent.l2 import base @@ -60,7 +61,7 @@ if port.port_name in portnames] #wait until ports are marked dead, with drop flow - agent_utils.wait_until_true( + utils.wait_until_true( lambda: num_ports_with_drop_flows( ofports, self.agent.int_br.dump_flows( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_ovs_flows.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_ovs_flows.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_ovs_flows.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_ovs_flows.py 2016-08-29 20:05:49.000000000 +0000 @@ -182,18 +182,18 @@ self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_mac_spoof_blocks_wrong_mac(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) # changing the allowed mac should stop the port from working self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr], mac='00:11:22:33:44:55') - net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_no_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_doesnt_block_ipv6(self): self.src_addr = '2000::1' @@ -205,7 +205,7 @@ # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_blocks_response(self): # this will prevent the destination from responding to the ARP @@ -250,7 +250,7 @@ self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_icmpv6_neigh_advt_allowed_address_pairs(self): self.src_addr = '2000::1' @@ -262,14 +262,14 @@ # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_allowed_address_pairs_0cidr(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['9.9.9.9/0', '1.2.3.4']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_disable_port_security(self): # block first and then disable port security to make sure old rules @@ -279,7 +279,7 @@ psec=False) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def test_arp_spoof_disable_network_port(self): # block first and then disable port security to make sure old rules @@ -290,7 +290,7 @@ device_owner=n_const.DEVICE_OWNER_ROUTER_GW) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) + net_helpers.assert_ping(self.src_namespace, self.dst_addr) def _setup_arp_spoof_for_port(self, port, addrs, psec=True, device_owner='nobody', mac=None): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_ovs_lib.py neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_ovs_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/agent/test_ovs_lib.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/agent/test_ovs_lib.py 2016-08-29 20:05:49.000000000 +0000 @@ -380,16 +380,20 @@ port_name = base.get_rand_name(prefix=net_helpers.PORT_PREFIX) br.add_port(port_name) self.ovs.set_db_attribute('Port', port_name, 'tag', 42) - tags = self.ovs.ovsdb.db_list('Port', columns=['tag']).execute() + + # wrap list/find in transaction so we get a single isolated snapshot + with self.ovs.ovsdb.transaction(check_error=True) as txn: + tags = txn.add(self.ovs.ovsdb.db_list('Port', columns=['tag'])) + len_0_list = txn.add(self.ovs.ovsdb.db_find( + 'Port', ('tag', '!=', []), columns=['tag'])) + single_value = txn.add(self.ovs.ovsdb.db_find( + 'Port', ('tag', '=', 42), columns=['tag'])) + # Make sure that there is data to query. # It should be, but let's be a little paranoid here as otherwise # the test has no sense - tags_present = [t for t in tags if t['tag'] != []] + tags_present = [t for t in tags.result if t['tag'] != []] self.assertTrue(tags_present) tags_42 = [t for t in tags_present if t['tag'] == 42] - single_value = self.ovs.ovsdb.db_find( - 'Port', ('tag', '=', 42), columns=['tag']).execute() - self.assertEqual(tags_42, single_value) - len_0_list = self.ovs.ovsdb.db_find( - 'Port', ('tag', '!=', []), columns=['tag']).execute() - self.assertEqual(tags_present, len_0_list) + self.assertEqual(tags_42, single_value.result) + self.assertItemsEqual(len_0_list.result, tags_present) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/cmd/test_ipset_cleanup.py neutron-9.0.0~b3~dev557/neutron/tests/functional/cmd/test_ipset_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/cmd/test_ipset_cleanup.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/cmd/test_ipset_cleanup.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.agent.linux import ipset_manager +from neutron.cmd import ipset_cleanup +from neutron.conf.agent import cmd +from neutron.tests import base + + +class TestIPSetCLIConfig(base.BaseTestCase): + + def setup_config(self, args=None): + self.conf = ipset_cleanup.setup_conf() + super(TestIPSetCLIConfig, self).setup_config(args=args) + + def test_ipset_opts_registration(self): + self.assertFalse(self.conf.allsets) + self.assertFalse(self.conf.force) + self.assertEqual(ipset_manager.NET_PREFIX, self.conf.prefix) + # to unregister opts + self.conf.reset() + self.conf.unregister_opts(cmd.ip_opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/cmd/test_netns_cleanup.py neutron-9.0.0~b3~dev557/neutron/tests/functional/cmd/test_netns_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/cmd/test_netns_cleanup.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/cmd/test_netns_cleanup.py 2016-08-03 20:10:34.000000000 +0000 @@ -19,6 +19,8 @@ from neutron.agent.linux import dhcp from neutron.agent.linux import ip_lib from neutron.cmd import netns_cleanup +from neutron.conf.agent import cmd +from neutron.tests import base as basetest from neutron.tests.common import net_helpers from neutron.tests.functional import base @@ -64,3 +66,17 @@ namespaces_now = ip_lib.IPWrapper.get_namespaces() self.assertNotIn(l3_namespace, namespaces_now) self.assertNotIn(dhcp_namespace, namespaces_now) + + +class TestNETNSCLIConfig(basetest.BaseTestCase): + + def setup_config(self, args=None): + self.conf = netns_cleanup.setup_conf() + super(TestNETNSCLIConfig, self).setup_config(args=args) + + def test_netns_opts_registration(self): + self.assertFalse(self.conf.force) + self.assertIsNone(self.conf.get('agent_type')) + # to unregister opts + self.conf.reset() + self.conf.unregister_opts(cmd.netns_opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/cmd/test_ovs_cleanup.py neutron-9.0.0~b3~dev557/neutron/tests/functional/cmd/test_ovs_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/cmd/test_ovs_cleanup.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/cmd/test_ovs_cleanup.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.cmd import ovs_cleanup +from neutron.conf.agent import cmd +from neutron.tests import base + + +class TestOVSCLIConfig(base.BaseTestCase): + + def setup_config(self, args=None): + self.conf = ovs_cleanup.setup_conf() + super(TestOVSCLIConfig, self).setup_config(args=args) + + def test_ovs_opts_registration(self): + self.assertFalse(self.conf.ovs_all_ports) + # to unregister opts + self.conf.reset() + self.conf.unregister_opts(cmd.ovs_opts) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/common/test_utils.py neutron-9.0.0~b3~dev557/neutron/tests/functional/common/test_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/common/test_utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/common/test_utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -10,8 +10,10 @@ # License for the specific language governing permissions and limitations # under the License. +import eventlet import os.path import stat +import testtools from neutron.common import utils from neutron.tests import base @@ -49,3 +51,12 @@ file_mode = 0o777 utils.replace_file(self.file_name, self.data, file_mode) self._verify_result(file_mode) + + +class TestWaitUntilTrue(base.BaseTestCase): + def test_wait_until_true_predicate_succeeds(self): + utils.wait_until_true(lambda: True) + + def test_wait_until_true_predicate_fails(self): + with testtools.ExpectedException(eventlet.timeout.Timeout): + utils.wait_until_true(lambda: False, 2) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/constants.py neutron-9.0.0~b3~dev557/neutron/tests/functional/constants.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/constants.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/constants.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,13 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +VLAN_COUNT = 4096 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py neutron-9.0.0~b3~dev557/neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,139 @@ +# Copyright 2016 Infoblox Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from oslo_db.sqlalchemy import utils as db_utils +from oslo_utils import uuidutils + +from neutron.tests.functional.db import test_migrations + + +class MigrationToPluggableIpamMixin(object): + """Validates data migration to Pluggable IPAM.""" + + _standard_attribute_id = 0 + + def _gen_attr_id(self, engine, type): + self._standard_attribute_id += 1 + standardattributes = db_utils.get_table(engine, 'standardattributes') + engine.execute(standardattributes.insert().values({ + 'id': self._standard_attribute_id, 'resource_type': type})) + return self._standard_attribute_id + + def _create_subnets(self, engine, data): + """Create subnets and saves subnet id in data""" + networks = db_utils.get_table(engine, 'networks') + subnets = db_utils.get_table(engine, 'subnets') + pools = db_utils.get_table(engine, 'ipallocationpools') + allocations = db_utils.get_table(engine, 'ipallocations') + + for cidr in data: + ip_version = 6 if ':' in cidr else 4 + # Save generated id in incoming dict to simplify validations + network_id = uuidutils.generate_uuid() + network_dict = dict( + id=network_id, + standard_attr_id=self._gen_attr_id(engine, 'networks')) + engine.execute(networks.insert().values(network_dict)) + + data[cidr]['id'] = uuidutils.generate_uuid() + subnet_dict = dict(id=data[cidr]['id'], + cidr=cidr, + ip_version=ip_version, + standard_attr_id=self._gen_attr_id(engine, + 'subnets')) + engine.execute(subnets.insert().values(subnet_dict)) + + if data[cidr].get('pools'): + for pool in data[cidr]['pools']: + pool_dict = dict(id=uuidutils.generate_uuid(), + first_ip=pool['first_ip'], + last_ip=pool['last_ip'], + subnet_id=data[cidr]['id']) + engine.execute(pools.insert().values(pool_dict)) + + if data[cidr].get('allocations'): + for ip in data[cidr]['allocations']: + ip_dict = dict(ip_address=ip, + subnet_id=data[cidr]['id'], + network_id=network_id) + engine.execute(allocations.insert().values(ip_dict)) + + def _pre_upgrade_3b935b28e7a0(self, engine): + data = { + '172.23.0.0/16': { + 'pools': [{'first_ip': '172.23.0.2', + 'last_ip': '172.23.255.254'}], + 'allocations': ('172.23.0.2', '172.23.245.2')}, + '192.168.40.0/24': { + 'pools': [{'first_ip': '192.168.40.2', + 'last_ip': '192.168.40.100'}, + {'first_ip': '192.168.40.105', + 'last_ip': '192.168.40.150'}, + {'first_ip': '192.168.40.155', + 'last_ip': '192.168.40.157'}, + ], + 'allocations': ('192.168.40.2', '192.168.40.3', + '192.168.40.15', '192.168.40.60')}, + 'fafc:babc::/64': { + 'pools': [{'first_ip': 'fafc:babc::2', + 'last_ip': 'fafc:babc::6:fe00', + }], + 'allocations': ('fafc:babc::3',)}} + self._create_subnets(engine, data) + return data + + def _check_3b935b28e7a0(self, engine, data): + subnets = db_utils.get_table(engine, 'ipamsubnets') + pools = db_utils.get_table(engine, 'ipamallocationpools') + allocations = db_utils.get_table(engine, 'ipamallocations') + + ipam_subnets = engine.execute(subnets.select()).fetchall() + # Count of ipam subnets should match count of usual subnets + self.assertEqual(len(data), len(ipam_subnets)) + neutron_to_ipam_id = {subnet.neutron_subnet_id: subnet.id + for subnet in ipam_subnets} + for cidr in data: + self.assertIn(data[cidr]['id'], neutron_to_ipam_id) + + ipam_subnet_id = neutron_to_ipam_id[data[cidr]['id']] + # Validate ip allocations are migrated correctly + ipam_allocations = engine.execute(allocations.select().where( + allocations.c.ipam_subnet_id == ipam_subnet_id)).fetchall() + for ipam_allocation in ipam_allocations: + self.assertIn(ipam_allocation.ip_address, + data[cidr]['allocations']) + self.assertEqual(len(data[cidr]['allocations']), + len(ipam_allocations)) + + # Validate allocation pools are migrated correctly + ipam_pools = engine.execute(pools.select().where( + pools.c.ipam_subnet_id == ipam_subnet_id)).fetchall() + # Covert to dict for easier lookup + pool_dict = {pool.first_ip: pool.last_ip for pool in ipam_pools} + for p in data[cidr]['pools']: + self.assertIn(p['first_ip'], pool_dict) + self.assertEqual(p['last_ip'], pool_dict[p['first_ip']]) + self.assertEqual(len(data[cidr]['pools']), + len(ipam_pools)) + + +class TestMigrationToPluggableIpamMysql(MigrationToPluggableIpamMixin, + test_migrations.TestWalkMigrationsMysql): + pass + + +class TestMigrationToPluggableIpamPsql(MigrationToPluggableIpamMixin, + test_migrations.TestWalkMigrationsPsql): + pass diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py neutron-9.0.0~b3~dev557/neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,102 @@ +# Copyright 2016 Business Cat is Very Serious +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from neutron_lib import constants +from oslo_db.sqlalchemy import utils as db_utils +from oslo_utils import uuidutils + +from neutron.tests.functional.db import test_migrations + + +class HARouterPortMigrationMixin(object): + """Validates HA port to router port migration.""" + + def _create_so(self, o_type, values): + """create standard attr object.""" + stan = db_utils.get_table(self.engine, 'standardattributes') + # find next available id taking into account existing records + rec_ids = [r.id for r in self.engine.execute(stan.select()).fetchall()] + next_id = max([0] + rec_ids) + 1 + self.engine.execute(stan.insert().values({'id': next_id, + 'resource_type': o_type})) + values['standard_attr_id'] = next_id + return self._create_rec(o_type, values) + + def _create_rec(self, o_type, values): + otable = db_utils.get_table(self.engine, o_type) + self.engine.execute(otable.insert().values(values)) + + def _make_router_agents_and_ports(self, router_id, network_id, + add_binding): + self._create_so('routers', {'id': router_id}) + # each router gets a couple of agents + for _ in range(2): + port_id = uuidutils.generate_uuid() + self._create_so('ports', {'id': port_id, 'network_id': network_id, + 'mac_address': port_id[0:31], + 'admin_state_up': True, + 'device_id': router_id, + 'device_owner': 'network', + 'status': 'ACTIVE'}) + agent_id = uuidutils.generate_uuid() + timestamp = '2000-04-06T14:34:23' + self._create_rec('agents', {'id': agent_id, 'topic': 'x', + 'agent_type': 'L3', + 'binary': 'x', + 'host': agent_id, + 'created_at': timestamp, + 'started_at': timestamp, + 'heartbeat_timestamp': timestamp, + 'configurations': ''}) + self._create_rec('ha_router_agent_port_bindings', + {'port_id': port_id, 'router_id': router_id, + 'l3_agent_id': agent_id}) + if add_binding: + ptype = constants.DEVICE_OWNER_ROUTER_HA_INTF + self._create_rec('routerports', + {'router_id': router_id, 'port_id': port_id, + 'port_type': ptype}) + + def _create_ha_routers_with_ports(self, engine): + network_id = uuidutils.generate_uuid() + self._create_so('networks', {'id': network_id}) + unpatched_router_ids = [uuidutils.generate_uuid() for i in range(10)] + for rid in unpatched_router_ids: + self._make_router_agents_and_ports(rid, network_id, False) + # make half of the routers already have routerport bindings to simulate + # a back-port of Ifd3e007aaf2a2ed8123275aa3a9f540838e3c003 + patched_router_ids = [uuidutils.generate_uuid() for i in range(10)] + for rid in patched_router_ids: + self._make_router_agents_and_ports(rid, network_id, True) + + def _pre_upgrade_a8b517cff8ab(self, engine): + self._create_ha_routers_with_ports(engine) + return True # return True so check function is invoked after migrate + + def _check_a8b517cff8ab(self, engine, data): + rp = db_utils.get_table(engine, 'routerports') + # just ensuring the correct count of routerport records is enough. + # 20 routers * 2 ports per router + self.assertEqual(40, len(engine.execute(rp.select()).fetchall())) + + +class TestHARouterPortMigrationMysql(HARouterPortMigrationMixin, + test_migrations.TestWalkMigrationsMysql): + pass + + +class TestHARouterPortMigrationPsql(HARouterPortMigrationMixin, + test_migrations.TestWalkMigrationsPsql): + pass diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/db/test_ipam.py neutron-9.0.0~b3~dev557/neutron/tests/functional/db/test_ipam.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/db/test_ipam.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/db/test_ipam.py 2016-08-03 20:10:34.000000000 +0000 @@ -22,12 +22,18 @@ from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db import models_v2 from neutron.ipam.drivers.neutrondb_ipam import db_models as ipam_models -from neutron.tests import base -from neutron.tests.common import base as common_base from neutron.tests.unit import testlib_api -class IpamTestCase(base.BaseTestCase): +# required in order for testresources to optimize same-backend +# tests together +load_tests = testlib_api.module_load_tests +# FIXME(zzzeek): needs to be provided by oslo.db, current version +# is not working +# load_tests = test_base.optimize_db_test_loader(__file__) + + +class IpamTestCase(testlib_api.SqlTestCase): """ Base class for tests that aim to test ip allocation. """ @@ -36,7 +42,6 @@ def setUp(self): super(IpamTestCase, self).setUp() cfg.CONF.set_override('notify_nova_on_port_status_changes', False) - self.useFixture(testlib_api.SqlFixture()) if self.use_pluggable_ipam: self._turn_on_pluggable_ipam() else: @@ -124,7 +129,7 @@ 'admin_state_up': True, 'status': constants.PORT_STATUS_ACTIVE, 'device_id': 'test_dev_id', - 'device_owner': 'compute', + 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, 'fixed_ips': port_fixed_ips} self.plugin.create_port(self.cxt, {'port': port}) @@ -155,17 +160,17 @@ self._create_port(self.port_id) -class TestIpamMySql(common_base.MySQLTestCase, IpamTestCase): +class TestIpamMySql(testlib_api.MySQLTestCaseMixin, IpamTestCase): pass -class TestIpamPsql(common_base.PostgreSQLTestCase, IpamTestCase): +class TestIpamPsql(testlib_api.PostgreSQLTestCaseMixin, IpamTestCase): pass -class TestPluggableIpamMySql(common_base.MySQLTestCase, IpamTestCase): +class TestPluggableIpamMySql(testlib_api.MySQLTestCaseMixin, IpamTestCase): use_pluggable_ipam = True -class TestPluggableIpamPsql(common_base.PostgreSQLTestCase, IpamTestCase): +class TestPluggableIpamPsql(testlib_api.PostgreSQLTestCaseMixin, IpamTestCase): use_pluggable_ipam = True diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/db/test_migrations.conf neutron-9.0.0~b3~dev557/neutron/tests/functional/db/test_migrations.conf --- neutron-9.0.0~b2~dev280/neutron/tests/functional/db/test_migrations.conf 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/db/test_migrations.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -[migration_dbs] -# Migration DB details are listed separately as they can't be connected to -# concurrently. These databases can't be the same as above - -# Note, sqlite:// is in-memory and unique each time it is spawned. -# However file sqlite's are not unique. - -#sqlite=sqlite:// -#sqlitefile=sqlite:///test_migrations.db -#mysql=mysql+mysqldb://user:pass@localhost/test_migrations -#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations - -[walk_style] -snake_walk=yes diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/db/test_migrations.py neutron-9.0.0~b3~dev557/neutron/tests/functional/db/test_migrations.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/db/test_migrations.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/db/test_migrations.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,36 +14,25 @@ import collections -import abc from alembic.ddl import base as alembic_ddl from alembic import script as alembic_script from contextlib import contextmanager -import os from oslo_config import cfg from oslo_config import fixture as config_fixture -from oslo_db.sqlalchemy import session -from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations -from oslo_db.sqlalchemy import utils as oslo_utils +from oslotest import base as oslotest_base import six -from six.moves import configparser -from six.moves.urllib import parse import sqlalchemy from sqlalchemy import event from sqlalchemy.sql import ddl as sqla_ddl -import sqlalchemy.sql.expression as expr -import sqlalchemy.types as types import subprocess from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.db.migration.models import head as head_models -from neutron.tests import base as base_tests -from neutron.tests.common import base +from neutron.tests.unit import testlib_api -cfg.CONF.import_opt('core_plugin', 'neutron.common.config') - -CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' +cfg.CONF.import_opt('core_plugin', 'neutron.conf.common') CREATION_OPERATIONS = { 'sqla': (sqla_ddl.CreateIndex, @@ -64,6 +53,12 @@ } +def upgrade(engine, alembic_config, branch_name='heads'): + cfg.CONF.set_override('connection', engine.url, group='database') + migration.do_alembic_command(alembic_config, 'upgrade', + branch_name) + + class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): '''Test for checking of equality models state and migrations. @@ -127,18 +122,23 @@ - existing correct column parameters, - right value, - wrong value. + + This class also contains tests for branches, like that correct operations + are used in contract and expand branches. + ''' + BUILD_SCHEMA = False + def setUp(self): super(_TestModelsMigrations, self).setUp() self.cfg = self.useFixture(config_fixture.Config()) - self.cfg.config(core_plugin=CORE_PLUGIN) + self.cfg.config(core_plugin='ml2') self.alembic_config = migration.get_neutron_config() self.alembic_config.neutron_config = cfg.CONF def db_sync(self, engine): - cfg.CONF.set_override('connection', engine.url, group='database') - migration.do_alembic_command(self.alembic_config, 'upgrade', 'heads') + upgrade(engine, self.alembic_config) def get_engine(self): return self.engine @@ -157,30 +157,6 @@ def filter_metadata_diff(self, diff): return list(filter(self.remove_unrelated_errors, diff)) - # TODO(akamyshnikova):when bug 1569262 fixed in oslo.db this won't be - # needed - @oslo_utils.DialectFunctionDispatcher.dispatch_for_dialect("*") - def _compare_server_default(bind, meta_col, insp_def, meta_def): - pass - - @_compare_server_default.dispatch_for('mysql') - def _compare_server_default(bind, meta_col, insp_def, meta_def): - if isinstance(meta_col.type, sqlalchemy.Boolean): - if meta_def is None or insp_def is None: - return meta_def != insp_def - return not ( - isinstance(meta_def.arg, expr.True_) and insp_def == "'1'" or - isinstance(meta_def.arg, expr.False_) and insp_def == "'0'" - ) - - impl_type = meta_col.type - if isinstance(impl_type, types.Variant): - impl_type = impl_type.load_dialect_impl(bind.dialect) - if isinstance(impl_type, (sqlalchemy.Integer, sqlalchemy.BigInteger)): - if meta_def is None or insp_def is None: - return meta_def != insp_def - return meta_def.arg != insp_def.split("'")[1] - # Remove some difference that are not mistakes just specific of # dialects, etc def remove_unrelated_errors(self, element): @@ -206,9 +182,18 @@ return False return True + def test_upgrade_expand_branch(self): + # Verify that "command neutron-db-manage upgrade --expand" works + # without errors. Check this for both MySQL and PostgreSQL. + upgrade(self.engine, self.alembic_config, + branch_name='%s@head' % migration.EXPAND_BRANCH) + + def test_upgrade_contract_branch(self): + # Verify that "command neutron-db-manage upgrade --contract" works + # without errors. Check this for both MySQL and PostgreSQL. + upgrade(self.engine, self.alembic_config, + branch_name='%s@head' % migration.CONTRACT_BRANCH) -class TestModelsMigrationsMysql(_TestModelsMigrations, - base.MySQLTestCase): @contextmanager def _listener(self, engine, listener_func): try: @@ -298,7 +283,7 @@ "command") find_migration_exceptions() - engine = self.get_engine() + engine = self.engine cfg.CONF.set_override('connection', engine.url, group='database') with engine.begin() as connection: self.alembic_config.attributes['connection'] = connection @@ -315,6 +300,25 @@ self.alembic_config, 'upgrade', '%s@head' % migration.CONTRACT_BRANCH) + def _test_has_offline_migrations(self, revision, expected): + engine = self.get_engine() + cfg.CONF.set_override('connection', engine.url, group='database') + migration.do_alembic_command(self.alembic_config, 'upgrade', revision) + self.assertEqual(expected, + migration.has_offline_migrations(self.alembic_config, + 'unused')) + + def test_has_offline_migrations_pending_contract_scripts(self): + self._test_has_offline_migrations('kilo', True) + + def test_has_offline_migrations_all_heads_upgraded(self): + self._test_has_offline_migrations('heads', False) + + +class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, + _TestModelsMigrations, + testlib_api.SqlTestCaseLight): + def test_check_mysql_engine(self): engine = self.get_engine() cfg.CONF.set_override('connection', engine.url, group='database') @@ -332,27 +336,15 @@ and table != 'alembic_version'] self.assertEqual(0, len(res), "%s non InnoDB tables created" % res) - def _test_has_offline_migrations(self, revision, expected): - engine = self.get_engine() - cfg.CONF.set_override('connection', engine.url, group='database') - migration.do_alembic_command(self.alembic_config, 'upgrade', revision) - self.assertEqual(expected, - migration.has_offline_migrations(self.alembic_config, - 'unused')) - - def test_has_offline_migrations_pending_contract_scripts(self): - self._test_has_offline_migrations('kilo', True) - - def test_has_offline_migrations_all_heads_upgraded(self): - self._test_has_offline_migrations('heads', False) - -class TestModelsMigrationsPsql(_TestModelsMigrations, - base.PostgreSQLTestCase): +class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, + _TestModelsMigrations, + testlib_api.SqlTestCaseLight): pass -class TestSanityCheck(test_base.DbTestCase): +class TestSanityCheck(testlib_api.SqlTestCaseLight): + BUILD_SCHEMA = False def setUp(self): super(TestSanityCheck, self).setUp() @@ -380,8 +372,29 @@ self.assertRaises(script.DuplicateL3HARouterAgentPortBinding, script.check_sanity, conn) + def test_check_sanity_030a959ceafa(self): + routerports = sqlalchemy.Table( + 'routerports', sqlalchemy.MetaData(), + sqlalchemy.Column('router_id', sqlalchemy.String(36)), + sqlalchemy.Column('port_id', sqlalchemy.String(36)), + sqlalchemy.Column('port_type', sqlalchemy.String(255))) -class TestWalkDowngrade(test_base.DbTestCase): + with self.engine.connect() as conn: + routerports.create(conn) + conn.execute(routerports.insert(), [ + {'router_id': '1234', 'port_id': '12345', + 'port_type': '123'}, + {'router_id': '12343', 'port_id': '12345', + 'port_type': '1232'} + ]) + script_dir = alembic_script.ScriptDirectory.from_config( + self.alembic_config) + script = script_dir.get_revision("030a959ceafa").module + self.assertRaises(script.DuplicatePortRecordinRouterPortdatabase, + script.check_sanity, conn) + + +class TestWalkDowngrade(oslotest_base.BaseTestCase): def setUp(self): super(TestWalkDowngrade, self).setUp() @@ -400,84 +413,16 @@ if failed_revisions: self.fail('Migrations %s have downgrade' % failed_revisions) - - -def _is_backend_avail(backend, - user="openstack_citest", - passwd="openstack_citest", - database="openstack_citest"): - # is_backend_avail will be soon deprecated from oslo_db - # thats why its added here - try: - connect_uri = oslo_utils.get_connect_string(backend, user=user, - passwd=passwd, - database=database) - engine = session.create_engine(connect_uri) - connection = engine.connect() - except Exception: - # intentionally catch all to handle exceptions even if we don't - # have any backend code loaded. - return False - else: - connection.close() - engine.dispose() return True -@six.add_metaclass(abc.ABCMeta) -class _TestWalkMigrations(base_tests.BaseTestCase, test_base.DbTestCase): +class _TestWalkMigrations(object): '''This will add framework for testing schema migarations for different backends. - Right now it supports pymysql and postgresql backends. Pymysql - and postgresql commands are executed to walk between to do updates. - For upgrade and downgrade migrate_up and migrate down functions - have been added. ''' - DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), - 'test_migrations.conf') - CONFIG_FILE_PATH = os.environ.get('NEUTRON_TEST_MIGRATIONS_CONF', - DEFAULT_CONFIG_FILE) - - def setUp(self): - if not _is_backend_avail(self.BACKEND): - self.skipTest("%s not available" % self.BACKEND) - - super(_TestWalkMigrations, self).setUp() - - self.snake_walk = False - self.test_databases = {} - - if os.path.exists(self.CONFIG_FILE_PATH): - cp = configparser.RawConfigParser() - try: - cp.read(self.CONFIG_FILE_PATH) - options = cp.options('migration_dbs') - for key in options: - self.test_databases[key] = cp.get('migration_dbs', key) - self.snake_walk = cp.getboolean('walk_style', 'snake_walk') - except configparser.ParsingError as e: - self.fail("Failed to read test_migrations.conf config " - "file. Got error: %s" % e) - else: - self.fail("Failed to find test_migrations.conf config " - "file.") - - self.engines = {} - for key, value in self.test_databases.items(): - self.engines[key] = sqlalchemy.create_engine(value) - - # We start each test case with a completely blank slate. - self._reset_databases() - - def assertColumnInTable(self, engine, table_name, column): - table = oslo_utils.get_table(engine, table_name) - self.assertIn(column, table.columns) - - def assertColumnNotInTables(self, engine, table_name, column): - table = oslo_utils.get_table(engine, table_name) - self.assertNotIn(column, table.columns) + BUILD_SCHEMA = False def execute_cmd(self, cmd=None): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, @@ -486,23 +431,6 @@ self.assertEqual(0, proc.returncode, 'Command failed with ' 'output:\n%s' % output) - @abc.abstractproperty - def BACKEND(self): - pass - - @abc.abstractmethod - def _database_recreate(self, user, password, database, host): - pass - - def _reset_databases(self): - for key, engine in self.engines.items(): - conn_string = self.test_databases[key] - conn_pieces = parse.urlparse(conn_string) - engine.dispose() - user, password, database, host = oslo_utils.get_db_connection_info( - conn_pieces) - self._database_recreate(user, password, database, host) - def _get_alembic_config(self, uri): db_config = migration.get_neutron_config() self.script_dir = alembic_script.ScriptDirectory.from_config(db_config) @@ -512,71 +440,18 @@ group='database') return db_config - def _revisions(self, downgrade=False): + def _revisions(self): """Provides revisions and its parent revisions. - :param downgrade: whether to include downgrade behavior or not. - :type downgrade: Bool :return: List of tuples. Every tuple contains revision and its parent revision. """ revisions = list(self.script_dir.walk_revisions("base", "heads")) - if not downgrade: - revisions = list(reversed(revisions)) + revisions = list(reversed(revisions)) for rev in revisions: - if downgrade: - # Destination, current - yield rev.down_revision, rev.revision - else: - # Destination, current - yield rev.revision, rev.down_revision - - def _walk_versions(self, config, engine, downgrade=True, snake_walk=False): - """Test migrations ability to upgrade and downgrade. - - :param downgrade: whether to include downgrade behavior or not. - :type downgrade: Bool - :snake_walk: enable mode when at every upgrade revision will be - downgraded and upgraded in previous state at upgrade and backward at - downgrade. - :type snake_walk: Bool - """ - - revisions = self._revisions() - for dest, curr in revisions: - self._migrate_up(config, engine, dest, curr, with_data=True) - - if snake_walk and dest != 'None': - # NOTE(I159): Pass reversed arguments into `_migrate_down` - # method because we have been upgraded to a destination - # revision and now we going to downgrade back. - self._migrate_down(config, engine, curr, dest, with_data=True) - self._migrate_up(config, engine, dest, curr, with_data=True) - - if downgrade: - revisions = self._revisions(downgrade) - for dest, curr in revisions: - self._migrate_down(config, engine, dest, curr, with_data=True) - if snake_walk: - self._migrate_up(config, engine, curr, - dest, with_data=True) - self._migrate_down(config, engine, dest, - curr, with_data=True) - - def _migrate_down(self, config, engine, dest, curr, with_data=False): - # First upgrade it to current to do downgrade - if dest: - migration.do_alembic_command(config, 'downgrade', dest) - else: - meta = sqlalchemy.MetaData(bind=engine) - meta.drop_all() - - if with_data: - post_downgrade = getattr( - self, "_post_downgrade_%s" % curr, None) - if post_downgrade: - post_downgrade(engine) + # Destination, current + yield rev.revision, rev.down_revision def _migrate_up(self, config, engine, dest, curr, with_data=False): if with_data: @@ -591,71 +466,24 @@ if check and data: check(engine, data) + def test_walk_versions(self): + """Test migrations ability to upgrade and downgrade. + + """ + engine = self.engine + config = self._get_alembic_config(engine.url) + revisions = self._revisions() + for dest, curr in revisions: + self._migrate_up(config, engine, dest, curr, with_data=True) + -class TestWalkMigrationsMysql(_TestWalkMigrations): +class TestWalkMigrationsMysql(testlib_api.MySQLTestCaseMixin, + _TestWalkMigrations, + testlib_api.SqlTestCaseLight): + pass - BACKEND = 'mysql+pymysql' - def _database_recreate(self, user, password, database, host): - # We can execute the MySQL client to destroy and re-create - # the MYSQL database, which is easier and less error-prone - # than using SQLAlchemy to do this via MetaData...trust me. - sql = ("drop database if exists %(database)s; create " - "database %(database)s;") % {'database': database} - cmd = ("mysql -u \"%(user)s\" -p%(password)s -h %(host)s " - "-e \"%(sql)s\"") % {'user': user, 'password': password, - 'host': host, 'sql': sql} - self.execute_cmd(cmd) - - def test_mysql_opportunistically(self): - connect_string = oslo_utils.get_connect_string(self.BACKEND, - "openstack_citest", user="openstack_citest", - passwd="openstack_citest") - engine = session.create_engine(connect_string) - config = self._get_alembic_config(connect_string) - self.engines["mysqlcitest"] = engine - self.test_databases["mysqlcitest"] = connect_string - - # build a fully populated mysql database with all the tables - self._reset_databases() - self._walk_versions(config, engine, False, False) - - -class TestWalkMigrationsPsql(_TestWalkMigrations): - - BACKEND = 'postgresql' - - def _database_recreate(self, user, password, database, host): - os.environ['PGPASSWORD'] = password - os.environ['PGUSER'] = user - # note(boris-42): We must create and drop database, we can't - # drop database which we have connected to, so for such - # operations there is a special database template1. - sqlcmd = ("psql -w -U %(user)s -h %(host)s -c" - " '%(sql)s' -d template1") - sql = "drop database if exists %(database)s;" - sql = sql % {'database': database} - droptable = sqlcmd % {'user': user, 'host': host, - 'sql': sql} - self.execute_cmd(droptable) - sql = "create database %(database)s;" - sql = sql % {'database': database} - createtable = sqlcmd % {'user': user, 'host': host, - 'sql': sql} - self.execute_cmd(createtable) - - def test_postgresql_opportunistically(self): - # add this to the global lists to make reset work with it, it's removed - # automatically in tearDown so no need to clean it up here. - connect_string = oslo_utils.get_connect_string(self.BACKEND, - "openstack_citest", - "openstack_citest", - "openstack_citest") - engine = session.create_engine(connect_string) - config = self._get_alembic_config(connect_string) - self.engines["postgresqlcitest"] = engine - self.test_databases["postgresqlcitest"] = connect_string - - # build a fully populated postgresql database with all the tables - self._reset_databases() - self._walk_versions(config, engine, False, False) +class TestWalkMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, + _TestWalkMigrations, + testlib_api.SqlTestCaseLight): + pass diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/pecan_wsgi/test_controllers.py neutron-9.0.0~b3~dev557/neutron/tests/functional/pecan_wsgi/test_controllers.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/pecan_wsgi/test_controllers.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/pecan_wsgi/test_controllers.py 2016-08-03 20:10:34.000000000 +0000 @@ -10,6 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. +import uuid + import mock from neutron_lib import constants as n_const from oslo_config import cfg @@ -365,6 +367,122 @@ self._test_method_returns_code('delete', 405) +class TestPaginationAndSorting(test_functional.PecanFunctionalTest): + + RESOURCE_COUNT = 6 + + def setUp(self): + cfg.CONF.set_override('allow_pagination', True) + cfg.CONF.set_override('allow_sorting', True) + super(TestPaginationAndSorting, self).setUp() + self.plugin = manager.NeutronManager.get_plugin() + self.ctx = context.get_admin_context() + self._create_networks(self.RESOURCE_COUNT) + self.networks = self._get_collection()['networks'] + + def _create_networks(self, count=1): + network_ids = [] + for index in range(count): + network = {'name': 'pecannet-%d' % index, 'tenant_id': 'tenid', + 'shared': False, 'admin_state_up': True, + 'status': 'ACTIVE'} + network_id = self.plugin.create_network( + self.ctx, {'network': network})['id'] + network_ids.append(network_id) + return network_ids + + def _get_collection(self, collection=None, limit=None, marker=None, + fields=None, page_reverse=False, sort_key=None, + sort_dir=None): + collection = collection or 'networks' + fields = fields or [] + query_params = [] + if limit: + query_params.append('limit=%d' % limit) + if marker: + query_params.append('marker=%s' % marker) + if page_reverse: + query_params.append('page_reverse=True') + if sort_key: + query_params.append('sort_key=%s' % sort_key) + if sort_dir: + query_params.append('sort_dir=%s' % sort_dir) + query_params.extend(['%s%s' % ('fields=', field) for field in fields]) + url = '/v2.0/%s.json' % collection + if query_params: + url = '%s?%s' % (url, '&'.join(query_params)) + list_resp = self.app.get(url, headers={'X-Project-Id': 'tenid'}) + self.assertEqual(200, list_resp.status_int) + return list_resp.json + + def _test_get_collection_with_pagination(self, expected_list, + collection=None, + limit=None, marker=None, + fields=None, page_reverse=False, + sort_key=None, sort_dir=None): + expected_list = expected_list or [] + collection = collection or 'networks' + list_resp = self._get_collection(collection=collection, limit=limit, + marker=marker, fields=fields, + page_reverse=page_reverse, + sort_key=sort_key, sort_dir=sort_dir) + if limit and marker: + links_key = '%s_links' % collection + self.assertIn(links_key, list_resp) + list_resp_ids = [item['id'] for item in list_resp[collection]] + self.assertEqual(expected_list, list_resp_ids) + if fields: + for item in list_resp[collection]: + for field in fields: + self.assertIn(field, item) + + def test_get_collection_with_pagination_limit(self): + self._test_get_collection_with_pagination([self.networks[0]['id']], + limit=1) + + def test_get_collection_with_pagination_limit_over_count(self): + expected_ids = [network['id'] for network in self.networks] + self._test_get_collection_with_pagination( + expected_ids, limit=self.RESOURCE_COUNT + 1) + + def test_get_collection_with_pagination_marker(self): + marker = self.networks[2]['id'] + expected_ids = [network['id'] for network in self.networks[3:]] + self._test_get_collection_with_pagination(expected_ids, limit=3, + marker=marker) + + def test_get_collection_with_pagination_marker_without_limit(self): + marker = self.networks[2]['id'] + expected_ids = [network['id'] for network in self.networks] + self._test_get_collection_with_pagination(expected_ids, marker=marker) + + def test_get_collection_with_pagination_and_fields(self): + expected_ids = [network['id'] for network in self.networks[:2]] + self._test_get_collection_with_pagination( + expected_ids, limit=2, fields=['id', 'name']) + + def test_get_collection_with_pagination_page_reverse(self): + marker = self.networks[2]['id'] + expected_ids = [network['id'] for network in self.networks[:2]] + self._test_get_collection_with_pagination(expected_ids, limit=3, + marker=marker, + page_reverse=True) + + def test_get_collection_with_sorting_desc(self): + nets = sorted(self.networks, key=lambda net: net['name'], reverse=True) + expected_ids = [network['id'] for network in nets] + self._test_get_collection_with_pagination(expected_ids, + sort_key='name', + sort_dir='desc') + + def test_get_collection_with_sorting_asc(self): + nets = sorted(self.networks, key=lambda net: net['name']) + expected_ids = [network['id'] for network in nets] + self._test_get_collection_with_pagination(expected_ids, + sort_key='name', + sort_dir='asc') + + class TestRequestProcessing(TestRootController): def setUp(self): @@ -485,7 +603,8 @@ def setUp(self): cfg.CONF.set_override( 'service_plugins', - ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin']) + ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', + 'neutron.services.flavors.flavors_plugin.FlavorsPlugin']) super(TestRouterController, self).setUp() plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() @@ -581,7 +700,8 @@ def setUp(self): cfg.CONF.set_override( 'service_plugins', - ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin']) + ['neutron.services.l3_router.l3_router_plugin.L3RouterPlugin', + 'neutron.services.flavors.flavors_plugin.FlavorsPlugin']) super(TestL3AgentShimControllers, self).setUp() policy.init() policy._ENFORCER.set_rules( @@ -645,7 +765,8 @@ policy._ENFORCER.set_rules( oslo_policy.Rules.from_dict( {'get_meh_meh': '', - 'get_meh_mehs': ''}), + 'get_meh_mehs': '', + 'get_fake_subresources': ''}), overwrite=False) self.addCleanup(policy.reset) @@ -665,3 +786,19 @@ resp = self.app.get(url) self.assertEqual(200, resp.status_int) self.assertEqual({body_collection: [{'fake': 'fake'}]}, resp.json) + + def test_hyphenated_collection_subresource_controller_not_shimmed(self): + body_collection = pecan_utils.FakeExtension.HYPHENATED_COLLECTION + uri_collection = body_collection.replace('_', '-') + # there is only one subresource so far + sub_resource_collection = ( + pecan_utils.FakeExtension.FAKE_SUB_RESOURCE_COLLECTION) + temp_id = str(uuid.uuid1()) + url = '/v2.0/{0}/{1}/{2}'.format( + uri_collection, + temp_id, + sub_resource_collection.replace('_', '-')) + resp = self.app.get(url) + self.assertEqual(200, resp.status_int) + self.assertEqual({sub_resource_collection: {'foo': temp_id}}, + resp.json) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/pecan_wsgi/test_functional.py neutron-9.0.0~b3~dev557/neutron/tests/functional/pecan_wsgi/test_functional.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/pecan_wsgi/test_functional.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/pecan_wsgi/test_functional.py 2016-08-03 20:10:34.000000000 +0000 @@ -30,7 +30,7 @@ class PecanFunctionalTest(testlib_api.SqlTestCase): def setUp(self, service_plugins=None, extensions=None): - self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') + self.setup_coreplugin('ml2') super(PecanFunctionalTest, self).setUp() self.addCleanup(exts.PluginAwareExtensionManager.clear_instance) self.addCleanup(set_config, {}, overwrite=True) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/pecan_wsgi/utils.py neutron-9.0.0~b3~dev557/neutron/tests/functional/pecan_wsgi/utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/pecan_wsgi/utils.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/pecan_wsgi/utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -110,6 +110,19 @@ HYPHENATED_RESOURCE = 'meh_meh' HYPHENATED_COLLECTION = HYPHENATED_RESOURCE + 's' + SUB_RESOURCE_ATTRIBUTE_MAP = { + 'fake_subresources': { + 'parent': { + 'collection_name': ( + HYPHENATED_COLLECTION), + 'member_name': HYPHENATED_RESOURCE}, + 'parameters': {'foo': {'is_visible': True}, + 'bar': {'is_visible': True} + } + } + } + FAKE_SUB_RESOURCE_COLLECTION = 'fake_subresources' + RAM = { HYPHENATED_COLLECTION: { 'fake': {'is_visible': True} @@ -137,12 +150,34 @@ params = self.RAM.get(self.HYPHENATED_COLLECTION, {}) attributes.PLURALS.update({self.HYPHENATED_COLLECTION: self.HYPHENATED_RESOURCE}) + fake_plugin = FakePlugin() controller = base.create_resource( collection, self.HYPHENATED_RESOURCE, FakePlugin(), params, allow_bulk=True, allow_pagination=True, allow_sorting=True) - return [extensions.ResourceExtension(collection, controller, - attr_map=params)] + resources = [extensions.ResourceExtension(collection, + controller, + attr_map=params)] + for collection_name in self.SUB_RESOURCE_ATTRIBUTE_MAP: + resource_name = collection_name + parent = self.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( + 'parent') + params = self.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get( + 'parameters') + + controller = base.create_resource(collection_name, resource_name, + fake_plugin, params, + allow_bulk=True, + parent=parent) + + resource = extensions.ResourceExtension( + collection_name, + controller, parent, + path_prefix="", + attr_map=params) + resources.append(resource) + + return resources def get_extended_resources(self, version): if version == "2.0": @@ -165,3 +200,7 @@ def get_meh_mehs(self, context, filters=None, fields=None): return [{'fake': 'fake'}] + + def get_meh_meh_fake_subresources(self, context, id_, fields=None, + filters=None): + return {'foo': id_} diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/scheduler/test_dhcp_agent_scheduler.py 2016-08-29 20:05:49.000000000 +0000 @@ -364,7 +364,8 @@ enable_dhcp = (not self._strip_host_index(net_id) in self.networks_with_dhcp_disabled) subnets.append({'network_id': net_id, - 'enable_dhcp': enable_dhcp}) + 'enable_dhcp': enable_dhcp, + 'segment_id': None}) return subnets def get_network(self, context, net_id): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/scheduler/test_l3_agent_scheduler.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,14 +16,21 @@ import collections import random +from neutron_lib import constants +from oslo_utils import uuidutils import testscenarios from neutron import context +from neutron.db import external_net_db from neutron.scheduler import l3_agent_scheduler from neutron.services.l3_router import l3_router_plugin from neutron.tests.common import helpers from neutron.tests.unit.db import test_db_base_plugin_v2 +_uuid = uuidutils.generate_uuid + +PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' + # Required to generate tests from scenarios. Not compatible with nose. load_tests = testscenarios.load_tests_apply_scenarios @@ -35,15 +42,16 @@ """ def setUp(self): - super(L3SchedulerBaseTest, self).setUp() + super(L3SchedulerBaseTest, self).setUp(PLUGIN_NAME) self.l3_plugin = l3_router_plugin.L3RouterPlugin() self.adminContext = context.get_admin_context() - self.adminContext.tenant_id = '_func_test_tenant_' + self.adminContext.tenant_id = _uuid() - def _create_l3_agent(self, host, context, agent_mode='legacy', plugin=None, - state=True): - agent = helpers.register_l3_agent(host, agent_mode) + def _create_l3_agent(self, host, context, agent_mode='legacy', + state=True, ext_net_id=''): + agent = helpers.register_l3_agent(host, agent_mode, + ext_net_id=ext_net_id) helpers.set_agent_admin_state(agent.id, state) return agent @@ -58,7 +66,7 @@ # down agent count. self.hosts = ['host-%s' % i for i in range(agent_count)] self.l3_agents = [self._create_l3_agent(self.hosts[i], - self.adminContext, 'legacy', self.l3_plugin, + self.adminContext, 'legacy', (i >= down_agent_count)) for i in range(agent_count)] def _create_routers(self, scheduled_router_count, @@ -280,8 +288,7 @@ class L3AZSchedulerBaseTest(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): - core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' - super(L3AZSchedulerBaseTest, self).setUp(plugin=core_plugin) + super(L3AZSchedulerBaseTest, self).setUp(plugin='ml2') self.l3_plugin = l3_router_plugin.L3RouterPlugin() self.l3_plugin.router_scheduler = None @@ -550,3 +557,274 @@ for i in range(self.az_count): self.assertEqual(self.expected_scheduled_agent_count[i], scheduled_azs.get('az%s' % i, 0)) + + +class L3DVRSchedulerBaseTest(L3SchedulerBaseTest): + + """Base class for functional test of DVR L3 schedulers. + Provides basic setup and utility functions. + """ + + def setUp(self): + super(L3DVRSchedulerBaseTest, self).setUp() + + self.default_ext_net_id = _uuid() + self.default_ext_subnet_id = _uuid() + + self.router_ext_net_id = _uuid() + self.router_ext_subnet_id = _uuid() + + def _create_router(self, name, distributed, ext_net_id=None): + router = {'name': name, 'admin_state_up': True, + 'tenant_id': self.adminContext.tenant_id, + 'distributed': distributed} + + if ext_net_id: + router['external_gateway_info'] = {'network_id': ext_net_id} + + return self.l3_plugin.create_router(self.adminContext, + {'router': router}) + + def _create_network(self, net_id, name=None, external=False): + network_dict = {'tenant_id': self.adminContext.tenant_id, + 'id': net_id, + 'name': name, + 'admin_state_up': True, + 'shared': False, + 'status': constants.NET_STATUS_ACTIVE} + network = self.plugin.create_network(self.adminContext, + {'network': network_dict}) + if external: + with self.adminContext.session.begin(): + network = external_net_db.ExternalNetwork(network_id=net_id) + self.adminContext.session.add(network) + + return network + + def _create_subnet(self, sub_id, network_id, cidr, gw_ip, name='test_sub'): + subnet = {'tenant_id': self.adminContext.tenant_id, + 'id': sub_id, + 'name': name, + 'network_id': network_id, + 'ip_version': 4, + 'cidr': cidr, + 'enable_dhcp': False, + 'gateway_ip': gw_ip, + 'shared': False, + 'allocation_pools': constants.ATTR_NOT_SPECIFIED, + 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, + 'host_routes': constants.ATTR_NOT_SPECIFIED} + + return self.plugin.create_subnet(self.adminContext, {'subnet': subnet}) + + +class L3DVRSchedulerTestCase(L3DVRSchedulerBaseTest): + + """Test various scenarios for L3 DVR schedulers: + + agent_mode + L3 agent mode. + + second_agent_mode + Second L3 agent mode for scenarios with two agents. + + agent_has_ext_network + Is there external network on the host. + + router_is_distributed + Is router distributed. + + router_already_hosted + Is router already hosted. + + router_has_ext_gw + Does router have external gateway. + + router_agent_have_same_ext_net + Do router and agent have the same external network. + + expected_router_scheduled + To verify do we expect router to get scheduled. + """ + + def get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR_SNAT, + second_agent_mode=None, + agent_has_ext_network=False, + router_is_distributed=False, + router_already_hosted=False, + router_has_ext_gw=False, + router_agent_have_same_ext_net=False, + expected_router_scheduled=False): + return dict(agent_mode=agent_mode, + second_agent_mode=second_agent_mode, + agent_has_ext_network=agent_has_ext_network, + router_is_distributed=router_is_distributed, + router_already_hosted=router_already_hosted, + router_has_ext_gw=router_has_ext_gw, + router_agent_have_same_ext_net=router_agent_have_same_ext_net, + expected_router_scheduled=expected_router_scheduled) + + scenarios = [ + ('Legacy router not scheduled on dvr agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR)), + + ('Legacy router scheduled on dvr_snat agent', + get_scenario(expected_router_scheduled=True)), + + ('Distributed router not scheduled on legacy agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, + router_is_distributed=True)), + + ('Distributed router not scheduled on dvr agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, + router_is_distributed=True)), + + ('Distributed router scheduled on dvr_snat agent', + get_scenario(router_is_distributed=True, + expected_router_scheduled=True)), + + ('Already hosted legacy router not scheduled on dvr agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, + router_already_hosted=True)), + + ('Already hosted legacy router not scheduled on dvr_snat agent', + get_scenario(router_already_hosted=True)), + + ('Already hosted distributed router not scheduled on legacy agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, + router_already_hosted=True, + router_is_distributed=True)), + + ('Already hosted distributed router not scheduled on dvr agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_DVR, + router_is_distributed=True, + router_already_hosted=True)), + + ('Already hosted distributed router not scheduled on dvr_snat agent', + get_scenario(router_is_distributed=True, + router_already_hosted=True)), + + ('Already hosted legacy router not scheduled on additional dvr agent', + get_scenario(agent_mode=constants.L3_AGENT_MODE_LEGACY, + second_agent_mode=constants.L3_AGENT_MODE_DVR_SNAT, + router_already_hosted=True)), + + ('Distributed router not scheduled if it is on a different ' + 'external network than the dvr_snat agent', + get_scenario(agent_has_ext_network=True, + router_is_distributed=True, + router_has_ext_gw=True, + router_agent_have_same_ext_net=False)), + ] + + def setUp(self): + super(L3DVRSchedulerTestCase, self).setUp() + + agent_cnt = 2 if self.second_agent_mode else 1 + + # create hosts for each agent + self.hosts = ['host-%s' % i for i in range(agent_cnt)] + + # create default external network + self._create_network(self.default_ext_net_id, + name='_test-ext-net', external=True) + self._create_subnet(self.default_ext_subnet_id, + self.default_ext_net_id, + '10.10.9.0/24', '10.10.9.1', + '_test-ext-net-subnet') + + if self.router_has_ext_gw and not self.router_agent_have_same_ext_net: + # for the test cases in which router and agent are not on same + # external network, we create an external network for router + self._create_network(self.router_ext_net_id, + name='_test-ext-net2', external=True) + self._create_subnet(self.router_ext_subnet_id, + self.router_ext_net_id, + '10.10.8.0/24', '10.10.8.1', + '_test-ext-net2-subnet') + # create agents: + self.l3_agents = [self._create_l3_agent(self.hosts[0], + self.adminContext, self.agent_mode, True, + self.default_ext_net_id if self.agent_has_ext_network else '')] + if self.second_agent_mode: + self.l3_agents.append(self._create_l3_agent(self.hosts[1], + self.adminContext, self.second_agent_mode, True, + self.default_ext_net_id if self.agent_has_ext_network else '')) + + # The router to schedule: + self.router_to_schedule = self._create_router_to_schedule() + + def _create_router_to_schedule(self): + router_to_schedule = None + + if self.router_has_ext_gw: + if self.router_agent_have_same_ext_net: + router_to_schedule = self._create_router('schd_rtr', + self.router_is_distributed, + self.default_ext_net_id) + else: + router_to_schedule = self._create_router('schd_rtr', + self.router_is_distributed, + self.router_ext_net_id) + else: + router_to_schedule = self._create_router('schd_rtr', + self.router_is_distributed) + + return router_to_schedule + + def _test_schedule_router(self): + if self.router_already_hosted: + self.scheduler.bind_router(self.adminContext, + self.router_to_schedule['id'], + self.l3_agents[0]) + + # schedule: + actual_scheduled_agent = self.scheduler.schedule( + self.l3_plugin, + self.adminContext, + self.router_to_schedule['id']) + + # check for router scheduling: + self.assertEqual(self.expected_router_scheduled, + bool(actual_scheduled_agent), + message='Failed to schedule agent') + + def _test_auto_schedule_routers(self): + if self.router_already_hosted: + self.scheduler.bind_router(self.adminContext, + self.router_to_schedule['id'], + self.l3_agents[0]) + did_it_schedule = False + + # schedule: + for host in self.hosts: + did_it_schedule = self.scheduler.auto_schedule_routers( + self.l3_plugin, self.adminContext, + host, [self.router_to_schedule['id']]) + if did_it_schedule: + break + + if self.router_already_hosted: + self.assertFalse(did_it_schedule, + 'Agent pre scheduled, yet no binding found!') + elif self.expected_router_scheduled: + self.assertTrue(did_it_schedule, + 'Agent not scheduled, not expected') + else: + self.assertFalse(did_it_schedule, 'Agent scheduled, not expected') + + def test_least_routers_schedule_router(self): + self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() + self._test_schedule_router() + + def test_least_routers_auto_schedule_routers(self): + self.scheduler = l3_agent_scheduler.LeastRoutersScheduler() + self._test_auto_schedule_routers() + + def test_chance_schedule_router(self): + self.scheduler = l3_agent_scheduler.ChanceScheduler() + self._test_schedule_router() + + def test_chance_auto_schedule_routers(self): + self.scheduler = l3_agent_scheduler.ChanceScheduler() + self._test_auto_schedule_routers() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py neutron-9.0.0~b3~dev557/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,7 +15,6 @@ import mock from neutron_lib import constants -from neutron.common import constants as n_const from neutron.common import topics from neutron.extensions import external_net from neutron.extensions import l3_ext_ha_mode @@ -32,7 +31,7 @@ super(L3DvrHATestCase, self).setUp() self.l3_agent_2 = helpers.register_l3_agent( host="standby", - agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, distributed=True, ha=True): return (super(L3DvrHATestCase, self). @@ -136,7 +135,7 @@ HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3' for host in [HOST1, HOST2, HOST3]: helpers.register_l3_agent( - host=host, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router(distributed=True, ha=True) arg_list = (portbindings.HOST_ID,) @@ -343,11 +342,13 @@ def _check_snat_internal_gateways_presence(self, router, subnet, int_cnt): snat_router_intfs = self.l3_plugin._get_snat_sync_interfaces( self.context, [router['id']]) - self.assertEqual(int_cnt, len(snat_router_intfs)) - if int_cnt > 1: + if int_cnt == 0: + self.assertEqual(0, len(snat_router_intfs)) + else: + snat_interfaces = snat_router_intfs[router['id']] + self.assertEqual(1, len(snat_interfaces)) self.assertEqual(subnet['subnet']['id'], - snat_router_intfs.values()[0][0]['fixed_ips'][0][ - 'subnet_id']) + snat_interfaces[0]['fixed_ips'][0]['subnet_id']) def _check_internal_subnet_interface_presence(self, router, subnet, int_cnt): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py neutron-9.0.0~b3~dev557/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,7 +16,6 @@ from neutron_lib import constants from neutron.api.rpc.handlers import l3_rpc -from neutron.common import constants as n_const from neutron.common import topics from neutron import context from neutron.extensions import external_net @@ -32,7 +31,7 @@ def setUp(self): super(L3DvrTestCase, self).setUp() self.l3_agent = helpers.register_l3_agent( - agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, distributed=True, ha=False): return (super(L3DvrTestCase, self). @@ -283,9 +282,9 @@ {'port': {portbindings.HOST_ID: 'host2'}}) # and create l3 agents on corresponding hosts helpers.register_l3_agent(host='host1', - agent_mode=n_const.L3_AGENT_MODE_DVR) + agent_mode=constants.L3_AGENT_MODE_DVR) helpers.register_l3_agent(host='host2', - agent_mode=n_const.L3_AGENT_MODE_DVR) + agent_mode=constants.L3_AGENT_MODE_DVR) # make net external ext_net_id = ext_subnet['subnet']['network_id'] @@ -456,7 +455,7 @@ def test_allowed_addr_pairs_arp_update_for_port_with_original_owner(self): HOST1 = 'host1' helpers.register_l3_agent( - host=HOST1, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', @@ -551,10 +550,10 @@ def test_allowed_addr_pairs_delayed_fip_and_update_arp_entry(self): HOST1 = 'host1' helpers.register_l3_agent( - host=HOST1, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) HOST2 = 'host2' helpers.register_l3_agent( - host=HOST2, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', @@ -692,7 +691,7 @@ def test_allowed_address_pairs_update_arp_entry(self): HOST1 = 'host1' helpers.register_l3_agent( - host=HOST1, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', @@ -798,7 +797,7 @@ def test_update_service_port_with_allowed_address_pairs(self): HOST1 = 'host1' helpers.register_l3_agent( - host=HOST1, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() private_net1 = self._make_network(self.fmt, 'net1', True) test_allocation_pools = [{'start': '10.1.0.2', @@ -875,7 +874,7 @@ constants.DEVICE_OWNER_LOADBALANCER) # Now change the compute port admin_state_up from True to # False, and see if the vrrp ports device_owner and binding - # inheritence reverts back to normal + # inheritance reverts back to normal mod_int_port = self.core_plugin.update_port( self.context, cur_int_port['id'], {'port': { @@ -937,10 +936,10 @@ # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1 = 'host1' helpers.register_l3_agent( - host=HOST1, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) HOST2 = 'host2' helpers.register_l3_agent( - host=HOST2, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() with self.subnet() as subnet: self.l3_plugin.add_router_interface( @@ -1036,7 +1035,7 @@ HOST = 'host1' non_admin_tenant = 'tenant1' helpers.register_l3_agent( - host=HOST, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() with self.network(shared=True) as net,\ self.subnet(network=net) as subnet,\ @@ -1082,7 +1081,7 @@ HOST1, HOST2 = 'host1', 'host2' for host in [HOST1, HOST2]: helpers.register_l3_agent( - host=host, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) @@ -1153,7 +1152,7 @@ HOST1, HOST2, HOST3 = 'host1', 'host2', 'host3' for host in [HOST1, HOST2, HOST3]: helpers.register_l3_agent( - host=host, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=host, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) @@ -1494,7 +1493,7 @@ def test_remove_router_interface(self): HOST1 = 'host1' helpers.register_l3_agent( - host=HOST1, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) router = self._create_router() arg_list = (portbindings.HOST_ID,) with self.subnet() as subnet,\ diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py neutron-9.0.0~b3~dev557/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,231 @@ +# All Rights Reserved. + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_log import log as logging +from oslo_utils import uuidutils +import testtools + +from neutron.common import utils as common_utils +from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager +from neutron.services.trunk import utils +from neutron.tests.common import conn_testers +from neutron.tests.common import helpers +from neutron.tests.common import net_helpers +from neutron.tests.functional import base +from neutron.tests.functional import constants as test_constants + +LOG = logging.getLogger(__name__) + +VLAN_RANGE = set(range(test_constants.VLAN_COUNT)) + + +class FakeOVSDBException(Exception): + pass + + +class TrunkParentPortTestCase(base.BaseSudoTestCase): + def setUp(self): + super(TrunkParentPortTestCase, self).setUp() + trunk_id = uuidutils.generate_uuid() + port_id = uuidutils.generate_uuid() + port_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) + self.trunk = trunk_manager.TrunkParentPort(trunk_id, port_id, port_mac) + self.trunk.bridge = self.useFixture( + net_helpers.OVSTrunkBridgeFixture( + self.trunk.bridge.br_name)).bridge + self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge + + def test_plug(self): + self.trunk.plug(self.br_int) + self.assertIn(self.trunk.patch_port_trunk_name, + self.trunk.bridge.get_port_name_list()) + self.assertIn(self.trunk.patch_port_int_name, + self.br_int.get_port_name_list()) + + def test_plug_failure_doesnt_create_ports(self): + with mock.patch.object( + self.trunk.bridge.ovsdb, 'db_set', + side_effect=FakeOVSDBException): + with testtools.ExpectedException(FakeOVSDBException): + self.trunk.plug(self.br_int) + self.assertNotIn(self.trunk.patch_port_trunk_name, + self.trunk.bridge.get_port_name_list()) + self.assertNotIn(self.trunk.patch_port_int_name, + self.br_int.get_port_name_list()) + + def test_unplug(self): + self.trunk.plug(self.br_int) + self.trunk.unplug(self.br_int) + self.assertFalse( + self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name)) + self.assertNotIn(self.trunk.patch_port_int_name, + self.br_int.get_port_name_list()) + + def test_unplug_failure_doesnt_delete_bridge(self): + self.trunk.plug(self.br_int) + with mock.patch.object( + self.trunk.bridge.ovsdb, 'del_port', + side_effect=FakeOVSDBException): + with testtools.ExpectedException(FakeOVSDBException): + self.trunk.unplug(self.br_int) + self.assertTrue( + self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name)) + self.assertIn(self.trunk.patch_port_trunk_name, + self.trunk.bridge.get_port_name_list()) + self.assertIn(self.trunk.patch_port_int_name, + self.br_int.get_port_name_list()) + + +class SubPortTestCase(base.BaseSudoTestCase): + def setUp(self): + super(SubPortTestCase, self).setUp() + trunk_id = uuidutils.generate_uuid() + port_id = uuidutils.generate_uuid() + port_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) + trunk_bridge_name = utils.gen_trunk_br_name(trunk_id) + trunk_bridge = self.useFixture( + net_helpers.OVSTrunkBridgeFixture(trunk_bridge_name)).bridge + segmentation_id = helpers.get_not_used_vlan( + trunk_bridge, VLAN_RANGE) + self.subport = trunk_manager.SubPort( + trunk_id, port_id, port_mac, segmentation_id) + self.subport.bridge = trunk_bridge + self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge + + def test_plug(self): + self.subport.plug(self.br_int) + self.assertIn(self.subport.patch_port_trunk_name, + self.subport.bridge.get_port_name_list()) + self.assertIn(self.subport.patch_port_int_name, + self.br_int.get_port_name_list()) + self.assertEqual( + self.subport.segmentation_id, + self.subport.bridge.db_get_val( + 'Port', self.subport.patch_port_trunk_name, 'tag')) + + def test_plug_failure_doesnt_create_ports(self): + with mock.patch.object( + self.subport.bridge.ovsdb, 'db_set', + side_effect=FakeOVSDBException): + with testtools.ExpectedException(FakeOVSDBException): + self.subport.plug(self.br_int) + self.assertNotIn(self.subport.patch_port_trunk_name, + self.subport.bridge.get_port_name_list()) + self.assertNotIn(self.subport.patch_port_int_name, + self.br_int.get_port_name_list()) + + def test_unplug(self): + self.subport.plug(self.br_int) + self.subport.unplug(self.br_int) + self.assertNotIn(self.subport.patch_port_trunk_name, + self.subport.bridge.get_port_name_list()) + self.assertNotIn(self.subport.patch_port_int_name, + self.br_int.get_port_name_list()) + + def test_unplug_failure(self): + self.subport.plug(self.br_int) + with mock.patch.object( + self.subport.bridge.ovsdb, 'del_port', + side_effect=FakeOVSDBException): + with testtools.ExpectedException(FakeOVSDBException): + self.subport.unplug(self.br_int) + self.assertIn(self.subport.patch_port_trunk_name, + self.subport.bridge.get_port_name_list()) + self.assertIn(self.subport.patch_port_int_name, + self.br_int.get_port_name_list()) + + +class TrunkManagerTestCase(base.BaseSudoTestCase): + net1_cidr = '192.178.0.1/24' + net2_cidr = '192.168.0.1/24' + + def setUp(self): + super(TrunkManagerTestCase, self).setUp() + trunk_id = uuidutils.generate_uuid() + self.tester = self.useFixture( + conn_testers.OVSTrunkConnectionTester( + self.net1_cidr, utils.gen_trunk_br_name(trunk_id))) + self.trunk_manager = trunk_manager.TrunkManager( + self.tester.bridge) + self.trunk = trunk_manager.TrunkParentPort( + trunk_id, uuidutils.generate_uuid()) + + # TODO(jlibosva): Replace all tester methods with more robust tests + def test_connectivity(self): + """Test connectivity with trunk and sub ports. + + In this test we create a vm that has a trunk on net1 and a vm peer on + the same network. We check connectivity between the peer and the vm. + We create a sub port on net2 and a peer, check connectivity again. + + """ + vlan_net1 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE) + vlan_net2 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE) + trunk_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) + sub_port_mac = common_utils.get_random_mac( + 'fa:16:3e:00:00:00'.split(':')) + sub_port_segmentation_id = helpers.get_not_used_vlan( + self.tester.bridge, VLAN_RANGE) + LOG.debug("Using %(n1)d vlan tag as local vlan ID for net1 and %(n2)d " + "for local vlan ID for net2", { + 'n1': vlan_net1, 'n2': vlan_net2}) + self.tester.set_peer_tag(vlan_net1) + self.trunk_manager.create_trunk(self.trunk.trunk_id, + self.trunk.port_id, + trunk_mac) + + # tag the patch port, this should be done by the ovs agent but we mock + # it for this test + conn_testers.OVSBaseConnectionTester.set_tag( + self.trunk.patch_port_int_name, self.tester.bridge, vlan_net1) + + self.tester.assert_connection(protocol=self.tester.ICMP, + direction=self.tester.INGRESS) + self.tester.assert_connection(protocol=self.tester.ICMP, + direction=self.tester.EGRESS) + + self.tester.add_vlan_interface_and_peer(sub_port_segmentation_id, + self.net2_cidr) + conn_testers.OVSBaseConnectionTester.set_tag( + self.tester._peer2.port.name, self.tester.bridge, vlan_net2) + + sub_port = trunk_manager.SubPort(self.trunk.trunk_id, + uuidutils.generate_uuid(), + sub_port_mac, + sub_port_segmentation_id) + + self.trunk_manager.add_sub_port(sub_port.trunk_id, + sub_port.port_id, + sub_port.port_mac, + sub_port.segmentation_id) + # tag the patch port, this should be done by the ovs agent but we mock + # it for this test + conn_testers.OVSBaseConnectionTester.set_tag( + sub_port.patch_port_int_name, self.tester.bridge, vlan_net2) + + self.tester.test_sub_port_icmp_connectivity(self.tester.INGRESS) + self.tester.test_sub_port_icmp_connectivity(self.tester.EGRESS) + + self.trunk_manager.remove_sub_port(sub_port.trunk_id, + sub_port.port_id) + self.tester.test_sub_port_icmp_no_connectivity(self.tester.INGRESS) + self.tester.test_sub_port_icmp_no_connectivity(self.tester.EGRESS) + + self.trunk_manager.remove_trunk(self.trunk.trunk_id, + self.trunk.port_id) + self.tester.assert_no_connection(protocol=self.tester.ICMP, + direction=self.tester.INGRESS) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/services/trunk/test_plugin.py neutron-9.0.0~b3~dev557/neutron/tests/functional/services/trunk/test_plugin.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/services/trunk/test_plugin.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/services/trunk/test_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,51 @@ +# (c) Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.extensions import portbindings as pb +from neutron.services.trunk import plugin as trunk_plugin +from neutron.services.trunk import utils as trunk_utils +from neutron.tests.common import helpers +from neutron.tests.unit.plugins.ml2 import base as ml2_test_base + + +class TestTrunkServicePlugin(ml2_test_base.ML2TestFramework): + + def setUp(self): + super(TestTrunkServicePlugin, self).setUp() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + + def test_ovs_bridge_name_set_when_trunk_bound(self): + helpers.register_ovs_agent(host=helpers.HOST) + with self.port() as port: + trunk_port_id = port['port']['id'] + trunk_req = {'port_id': trunk_port_id, + 'tenant_id': 'test_tenant', + 'sub_ports': []} + trunk_res = self.trunk_plugin.create_trunk(self.context, + {'trunk': trunk_req}) + port['port'][pb.HOST_ID] = helpers.HOST + bound_port = self.core_plugin.update_port(self.context, + trunk_port_id, port) + self.assertEqual( + trunk_utils.gen_trunk_br_name(trunk_res['id']), + bound_port[pb.VIF_DETAILS][pb.VIF_DETAILS_BRIDGE_NAME]) + + def test_ovs_bridge_name_not_set_when_not_trunk(self): + helpers.register_ovs_agent(host=helpers.HOST) + with self.port() as port: + port['port'][pb.HOST_ID] = helpers.HOST + bound_port = self.core_plugin.update_port(self.context, + port['port']['id'], port) + self.assertIsNone( + bound_port[pb.VIF_DETAILS].get(pb.VIF_DETAILS_BRIDGE_NAME)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/functional/test_server.py neutron-9.0.0~b3~dev557/neutron/tests/functional/test_server.py --- neutron-9.0.0~b2~dev280/neutron/tests/functional/test_server.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/functional/test_server.py 2016-08-03 20:10:34.000000000 +0000 @@ -24,7 +24,7 @@ from oslo_config import cfg import psutil -from neutron.agent.linux import utils +from neutron.common import utils from neutron import service from neutron.tests import base from neutron import worker as neutron_worker @@ -223,7 +223,7 @@ def setUp(self): super(TestRPCServer, self).setUp() - self.setup_coreplugin(TARGET_PLUGIN) + self.setup_coreplugin('ml2') self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.rpc_workers_supported = True @@ -257,7 +257,7 @@ def setUp(self): super(TestPluginWorker, self).setUp() - self.setup_coreplugin(TARGET_PLUGIN) + self.setup_coreplugin('ml2') self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/retargetable/client_fixtures.py neutron-9.0.0~b3~dev557/neutron/tests/retargetable/client_fixtures.py --- neutron-9.0.0~b2~dev280/neutron/tests/retargetable/client_fixtures.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/retargetable/client_fixtures.py 2016-08-03 20:10:34.000000000 +0000 @@ -73,7 +73,7 @@ def _setUp(self): super(PluginClientFixture, self)._setUp() - self.useFixture(testlib_api.SqlFixture()) + self.useFixture(testlib_api.StaticSqlFixture()) self.useFixture(self.plugin_conf) self.useFixture(base.PluginFixture(self.plugin_conf.plugin_name)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/admin/test_dhcp_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/admin/test_dhcp_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/admin/test_dhcp_agent_scheduler.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/admin/test_dhcp_agent_scheduler.py 2016-08-03 20:10:34.000000000 +0000 @@ -12,8 +12,10 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron_lib import constants from tempest import test +from neutron.common import utils from neutron.tests.tempest.api import base @@ -30,6 +32,18 @@ cls.cidr = cls.subnet['cidr'] cls.port = cls.create_port(cls.network) + @test.idempotent_id('f164801e-1dd8-4b8b-b5d3-cc3ac77cfaa5') + def test_dhcp_port_status_active(self): + + def dhcp_port_active(): + for p in self.client.list_ports( + network_id=self.network['id'])['ports']: + if (p['device_owner'] == constants.DEVICE_OWNER_DHCP and + p['status'] == constants.PORT_STATUS_ACTIVE): + return True + return False + utils.wait_until_true(dhcp_port_active) + @test.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d') def test_list_dhcp_agent_hosting_network(self): self.admin_client.list_dhcp_agent_hosting_network( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/admin/test_shared_network_extension.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/admin/test_shared_network_extension.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/admin/test_shared_network_extension.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/admin/test_shared_network_extension.py 2016-08-29 20:05:49.000000000 +0000 @@ -192,6 +192,22 @@ )['rbac_policy'] return {'network': net, 'subnet': subnet, 'policy': pol} + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-bfffffff1eee') + def test_create_rbac_policy_with_target_tenant_none(self): + with testtools.ExpectedException(lib_exc.BadRequest): + self._make_admin_net_and_subnet_shared_to_tenant_id( + tenant_id=None) + + @test.attr(type='smoke') + @test.idempotent_id('86c3529b-1231-40de-803c-bfffffff1fff') + def test_create_rbac_policy_with_target_tenant_too_long_id(self): + with testtools.ExpectedException(lib_exc.BadRequest): + target_tenant = '1234' * 100 + self._make_admin_net_and_subnet_shared_to_tenant_id( + tenant_id=target_tenant) + + @test.attr(type='smoke') @test.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff') def test_network_only_visible_to_policy_target(self): net = self._make_admin_net_and_subnet_shared_to_tenant_id( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/base.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/base.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/base.py 2016-08-29 20:05:49.000000000 +0000 @@ -61,10 +61,11 @@ @classmethod def get_client_manager(cls, credential_type=None, roles=None, force_new=None): - manager = test.BaseTestCase.get_client_manager( - credential_type=credential_type, - roles=roles, - force_new=force_new) + manager = super(BaseNetworkTest, cls).get_client_manager( + credential_type=credential_type, + roles=roles, + force_new=force_new + ) # Neutron uses a different clients manager than the one in the Tempest return clients.Manager(manager.credentials) @@ -471,7 +472,7 @@ def _require_sorting(f): @functools.wraps(f) def inner(self, *args, **kwargs): - if not CONF.neutron_plugin_options.validate_sorting: + if not test.is_extension_enabled("sorting", "network"): self.skipTest('Sorting feature is required') return f(self, *args, **kwargs) return inner @@ -480,7 +481,7 @@ def _require_pagination(f): @functools.wraps(f) def inner(self, *args, **kwargs): - if not CONF.neutron_plugin_options.validate_pagination: + if not test.is_extension_enabled("pagination", "network"): self.skipTest('Pagination feature is required') return f(self, *args, **kwargs) return inner @@ -491,6 +492,8 @@ # This should be defined by subclasses to reflect resource name to test resource = None + field = 'name' + # NOTE(ihrachys): some names, like those starting with an underscore (_) # are sorted differently depending on whether the plugin implements native # sorting support, or not. So we avoid any such cases here, sticking to @@ -510,7 +513,7 @@ actual = list(actual) self.assertEqual(len(original), len(actual)) for expected, res in zip(original, actual): - self.assertEqual(expected['name'], res['name']) + self.assertEqual(expected[self.field], res[self.field]) @utils.classproperty def plural_name(self): @@ -537,13 +540,13 @@ def _test_list_sorts(self, direction): sort_args = { 'sort_dir': direction, - 'sort_key': 'name' + 'sort_key': self.field } body = self.list_method(**sort_args) resources = self._extract_resources(body) self.assertNotEmpty( resources, "%s list returned is empty" % self.resource) - retrieved_names = [res['name'] for res in resources] + retrieved_names = [res[self.field] for res in resources] expected = sorted(retrieved_names) if direction == constants.SORT_DIRECTION_DESC: expected = list(reversed(expected)) @@ -580,7 +583,7 @@ # first, collect all resources for later comparison sort_args = { 'sort_dir': constants.SORT_DIRECTION_ASC, - 'sort_key': 'name' + 'sort_key': self.field } body = self.list_method(**sort_args) expected_resources = self._extract_resources(body) @@ -666,7 +669,7 @@ self, direction=constants.SORT_DIRECTION_ASC): pagination_args = { 'sort_dir': direction, - 'sort_key': 'name', + 'sort_key': self.field, } body = self.list_method(**pagination_args) expected_resources = self._extract_resources(body) @@ -709,7 +712,7 @@ def _test_list_pagination_page_reverse(self, direction): pagination_args = { 'sort_dir': direction, - 'sort_key': 'name', + 'sort_key': self.field, 'limit': 3, } body = self.list_method(**pagination_args) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/clients.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/clients.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/clients.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/clients.py 2016-08-03 20:10:34.000000000 +0000 @@ -15,8 +15,8 @@ from tempest.lib.services.compute import keypairs_client from tempest.lib.services.compute import servers_client +from tempest.lib.services.identity.v2 import tenants_client from tempest import manager -from tempest.services.identity.v2.json import tenants_client from neutron.tests.tempest import config from neutron.tests.tempest.services.network.json import network_client diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_allowed_address_pair.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_allowed_address_pair.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_allowed_address_pair.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_allowed_address_pair.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,8 +19,6 @@ from neutron.tests.tempest.api import base from neutron.tests.tempest import config -CONF = config.CONF - class AllowedAddressPairTestJSON(base.BaseNetworkTest): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_auto_allocated_topology.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_auto_allocated_topology.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_auto_allocated_topology.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_auto_allocated_topology.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,11 +22,14 @@ class TestAutoAllocatedTopology(base.BaseAdminNetworkTest): """ - NOTE: This test may eventually migrate to Tempest. - - Tests the Get-Me-A-Network operation in the Neutron API + Tests the Get-Me-A-Network operations in the Neutron API using the REST client for Neutron. """ + # NOTE(armax): this is a precaution to avoid interference + # from other tests exercising this extension. So long as + # all tests are added under TestAutoAllocatedTopology, + # nothing bad should happen. + force_tenant_isolation = True @classmethod @test.requires_ext(extension="auto-allocated-topology", service="network") @@ -58,9 +61,10 @@ return len([resource['id'] for resource in resources if resource['name'].startswith('auto_allocated_')]) - networks = _count(self.client.list_networks()['networks']) - subnets = _count(self.client.list_subnets()['subnets']) - routers = _count(self.client.list_routers()['routers']) + up = {'admin_state_up': True} + networks = _count(self.client.list_networks(**up)['networks']) + subnets = _count(self.client.list_subnets(**up)['subnets']) + routers = _count(self.client.list_routers(**up)['routers']) return networks, subnets, routers def _add_topology_cleanup(self, client): @@ -100,3 +104,14 @@ # After the initial GET, the API should be idempotent self.assertEqual(network_id1, network_id2) self.assertEqual(resources_after1, resources_after2) + + @test.idempotent_id('aabc0b02-cee4-11e5-9f3c-091127605a2b') + def test_delete_allocated_net_topology_as_tenant(self): + resources_before = self._count_topology_resources() + self.assertEqual((0, 0, 0), resources_before) + body = self.client.get_auto_allocated_topology() + topology = body['auto_allocated_topology'] + self.assertIsNotNone(topology) + self.client.delete_auto_allocated_topology() + resources_after = self._count_topology_resources() + self.assertEqual((0, 0, 0), resources_after) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_extensions.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_extensions.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_extensions.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_extensions.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import test + +from neutron.tests.tempest.api import base + + +class ExtensionsTest(base.BaseNetworkTest): + + def _test_list_extensions_includes(self, ext): + body = self.client.list_extensions() + extensions = {ext_['alias'] for ext_ in body['extensions']} + self.assertNotEmpty(extensions, "Extension list returned is empty") + ext_enabled = test.is_extension_enabled(ext, "network") + if ext_enabled: + self.assertIn(ext, extensions) + else: + self.assertNotIn(ext, extensions) + + @test.idempotent_id('262420b7-a4bb-4a3e-b4b5-e73bad18df8c') + def test_list_extensions_sorting(self): + self._test_list_extensions_includes('sorting') + + @test.idempotent_id('19db409e-a23f-445d-8bc8-ca3d64c84706') + def test_list_extensions_pagination(self): + self._test_list_extensions_includes('pagination') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_floating_ips.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_floating_ips.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_floating_ips.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_floating_ips.py 2016-08-29 20:05:49.000000000 +0000 @@ -41,6 +41,20 @@ for i in range(2): cls.create_port(cls.network) + @test.idempotent_id('f6a0fb6c-cb64-4b81-b0d5-f41d8f69d22d') + def test_blank_update_clears_association(self): + # originally the floating IP had no attributes other than its + # association, so an update with an empty body was a signal to + # clear the association. This test ensures we maintain that behavior. + body = self.client.create_floatingip( + floating_network_id=self.ext_net_id, + port_id=self.ports[0]['id'], + description='d1' + )['floatingip'] + self.assertEqual(self.ports[0]['id'], body['port_id']) + body = self.client.update_floatingip(body['id'])['floatingip'] + self.assertFalse(body['port_id']) + @test.idempotent_id('c72c1c0c-2193-4aca-eeee-b1442641ffff') @test.requires_ext(extension="standard-attr-description", service="network") diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_network_ip_availability.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_network_ip_availability.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_network_ip_availability.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_network_ip_availability.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,8 +24,6 @@ from neutron_lib import constants as lib_constants -CONF = config.CONF - # 3 IP addresses are taken from every total for IPv4 these are reserved DEFAULT_IP4_RESERVED = 3 # 2 IP addresses are taken from every total for IPv6 these are reserved @@ -86,7 +84,7 @@ mask_bits = config.safe_get_config_value( 'network', 'project_network_v6_mask_bits') - subnet_cidr = cidr.subnet(mask_bits).next() + subnet_cidr = next(cidr.subnet(mask_bits)) prefix_len = subnet_cidr.prefixlen subnet = self.create_subnet(network, cidr=subnet_cidr, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_networks.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_networks.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_networks.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_networks.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,9 +16,6 @@ from tempest import test from neutron.tests.tempest.api import base -from neutron.tests.tempest import config - -CONF = config.CONF class NetworksTestJSON(base.BaseNetworkTest): @@ -95,7 +92,7 @@ resource = 'network' - list_kwargs = {'shared': False} + list_kwargs = {'shared': False, 'router:external': False} @classmethod def resource_setup(cls): @@ -103,47 +100,38 @@ for name in cls.resource_names: cls.create_network(network_name=name) - @test.attr(type='smoke') @test.idempotent_id('de27d34a-bd9d-4516-83d6-81ef723f7d0d') def test_list_sorts_asc(self): self._test_list_sorts_asc() - @test.attr(type='smoke') @test.idempotent_id('e767a160-59f9-4c4b-8dc1-72124a68640a') def test_list_sorts_desc(self): self._test_list_sorts_desc() - @test.attr(type='smoke') @test.idempotent_id('71389852-f57b-49f2-b109-77b705e9e8af') def test_list_pagination(self): self._test_list_pagination() - @test.attr(type='smoke') @test.idempotent_id('b7e153d2-37c3-48d4-8390-ec13498fee3d') def test_list_pagination_with_marker(self): self._test_list_pagination_with_marker() - @test.attr(type='smoke') @test.idempotent_id('8a9c89df-0ee7-4c0d-8f1d-ec8f27cf362f') def test_list_pagination_with_href_links(self): self._test_list_pagination_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('79a52810-2156-4ab6-b577-9e46e58d4b58') def test_list_pagination_page_reverse_asc(self): self._test_list_pagination_page_reverse_asc() - @test.attr(type='smoke') @test.idempotent_id('36a4671f-a542-442f-bc44-a8873ee778d1') def test_list_pagination_page_reverse_desc(self): self._test_list_pagination_page_reverse_desc() - @test.attr(type='smoke') @test.idempotent_id('13eb066c-aa90-406d-b4c3-39595bf8f910') def test_list_pagination_page_reverse_with_href_links(self): self._test_list_pagination_page_reverse_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('f1867fc5-e1d6-431f-bc9f-8b882e43a7f9') def test_list_no_pagination_limit_0(self): self._test_list_no_pagination_limit_0() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_ports.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_ports.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_ports.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_ports.py 2016-08-03 20:10:34.000000000 +0000 @@ -59,47 +59,38 @@ for name in cls.resource_names: cls.create_port(net, name=name) - @test.attr(type='smoke') @test.idempotent_id('9ab73df4-960a-4ae3-87d3-60992b8d3e2d') def test_list_sorts_asc(self): self._test_list_sorts_asc() - @test.attr(type='smoke') @test.idempotent_id('b426671d-7270-430f-82ff-8f33eec93010') def test_list_sorts_desc(self): self._test_list_sorts_desc() - @test.attr(type='smoke') @test.idempotent_id('a202fdc8-6616-45df-b6a0-463932de6f94') def test_list_pagination(self): self._test_list_pagination() - @test.attr(type='smoke') @test.idempotent_id('f4723b8e-8186-4b9a-bf9e-57519967e048') def test_list_pagination_with_marker(self): self._test_list_pagination_with_marker() - @test.attr(type='smoke') @test.idempotent_id('fcd02a7a-f07e-4d5e-b0ca-b58e48927a9b') def test_list_pagination_with_href_links(self): self._test_list_pagination_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('3afe7024-77ab-4cfe-824b-0b2bf4217727') def test_list_no_pagination_limit_0(self): self._test_list_no_pagination_limit_0() - @test.attr(type='smoke') @test.idempotent_id('b8857391-dc44-40cc-89b7-2800402e03ce') def test_list_pagination_page_reverse_asc(self): self._test_list_pagination_page_reverse_asc() - @test.attr(type='smoke') @test.idempotent_id('4e51e9c9-ceae-4ec0-afd4-147569247699') def test_list_pagination_page_reverse_desc(self): self._test_list_pagination_page_reverse_desc() - @test.attr(type='smoke') @test.idempotent_id('74293e59-d794-4a93-be09-38667199ef68') def test_list_pagination_page_reverse_with_href_links(self): self._test_list_pagination_page_reverse_with_href_links() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_qos.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_qos.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_qos.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_qos.py 2016-08-29 20:05:49.000000000 +0000 @@ -75,6 +75,28 @@ self.assertTrue(retrieved_policy['shared']) self.assertEqual([], retrieved_policy['rules']) + @test.idempotent_id('6e880e0f-bbfc-4e54-87c6-680f90e1b618') + def test_policy_update_forbidden_for_regular_tenants_own_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='', + shared=False, + tenant_id=self.client.tenant_id) + self.assertRaises( + exceptions.Forbidden, + self.client.update_qos_policy, + policy['id'], description='test policy') + + @test.idempotent_id('4ecfd7e7-47b6-4702-be38-be9235901a87') + def test_policy_update_forbidden_for_regular_tenants_foreign_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='', + shared=False, + tenant_id=self.admin_client.tenant_id) + self.assertRaises( + exceptions.NotFound, + self.client.update_qos_policy, + policy['id'], description='test policy') + @test.idempotent_id('ee263db4-009a-4641-83e5-d0e83506ba4c') def test_shared_policy_update(self): policy = self.create_qos_policy(name='test-policy', @@ -124,7 +146,7 @@ # In theory, we could make the test conditional on which ml2 drivers # are enabled in gate (or more specifically, on which supported qos # rules are claimed by core plugin), but that option doesn't seem to be - # available thru tempest.lib framework + # available through tempest.lib framework expected_rule_types = [] expected_rule_details = ['type'] @@ -426,6 +448,34 @@ self.client.create_bandwidth_limit_rule, 'policy', 1, 2) + @test.idempotent_id('1bfc55d9-6fd8-4293-ab3a-b1d69bf7cd2e') + def test_rule_update_forbidden_for_regular_tenants_own_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False, + tenant_id=self.client.tenant_id) + rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], + max_kbps=1, + max_burst_kbps=1) + self.assertRaises( + exceptions.NotFound, + self.client.update_bandwidth_limit_rule, + policy['id'], rule['id'], max_kbps=2, max_burst_kbps=4) + + @test.idempotent_id('9a607936-4b6f-4c2f-ad21-bd5b3d4fc91f') + def test_rule_update_forbidden_for_regular_tenants_foreign_policy(self): + policy = self.create_qos_policy(name='test-policy', + description='test policy', + shared=False, + tenant_id=self.admin_client.tenant_id) + rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'], + max_kbps=1, + max_burst_kbps=1) + self.assertRaises( + exceptions.NotFound, + self.client.update_bandwidth_limit_rule, + policy['id'], rule['id'], max_kbps=2, max_burst_kbps=4) + @test.idempotent_id('ce0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2') def test_get_rules_by_policy(self): policy1 = self.create_qos_policy(name='test-policy1', @@ -737,7 +787,7 @@ policy_rules = retrieved_policy['policy']['rules'] self.assertEqual(1, len(policy_rules)) self.assertEqual(rule['id'], policy_rules[0]['id']) - self.assertEqual(qos_consts.RULE_TYPE_DSCP_MARK, + self.assertEqual(qos_consts.RULE_TYPE_DSCP_MARKING, policy_rules[0]['type']) @test.idempotent_id('08553ffe-030f-4037-b486-7e0b8fb9385a') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_revisions.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_revisions.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_revisions.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_revisions.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,136 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import test + +from neutron.tests.tempest.api import base +from neutron.tests.tempest.api import base_security_groups as bsg +from neutron.tests.tempest import config + + +class TestRevisions(base.BaseAdminNetworkTest, bsg.BaseSecGroupTest): + + @classmethod + @test.requires_ext(extension="revisions", service="network") + def skip_checks(cls): + super(TestRevisions, cls).skip_checks() + + @test.idempotent_id('4a26a4be-9c53-483c-bc50-b53f1db10ac6') + def test_update_network_bumps_revision(self): + net = self.create_network() + self.assertIn('revision', net) + updated = self.client.update_network(net['id'], name='newnet') + self.assertGreater(updated['network']['revision'], net['revision']) + + @test.idempotent_id('cac7ecde-12d5-4331-9a03-420899dea077') + def test_update_port_bumps_revision(self): + net = self.create_network() + port = self.create_port(net) + self.assertIn('revision', port) + updated = self.client.update_port(port['id'], name='newport') + self.assertGreater(updated['port']['revision'], port['revision']) + + @test.idempotent_id('c1c4fa41-8e89-44d0-9bfc-409f3b66dc57') + def test_update_subnet_bumps_revision(self): + net = self.create_network() + subnet = self.create_subnet(net) + self.assertIn('revision', subnet) + updated = self.client.update_subnet(subnet['id'], name='newsub') + self.assertGreater(updated['subnet']['revision'], subnet['revision']) + + @test.idempotent_id('e8c5d7db-2b8d-4615-a476-6e537437c4f2') + def test_update_subnetpool_bumps_revision(self): + sp = self.create_subnetpool('subnetpool', default_prefixlen=24, + prefixes=['10.0.0.0/8']) + self.assertIn('revision', sp) + updated = self.admin_client.update_subnetpool(sp['id'], name='sp2') + self.assertGreater(updated['subnetpool']['revision'], sp['revision']) + + @test.idempotent_id('6c256f71-c929-4200-b3dc-4e1843506be5') + @test.requires_ext(extension="security-group", service="network") + def test_update_sg_group_bumps_revision(self): + sg, name = self._create_security_group() + self.assertIn('revision', sg['security_group']) + update_body = self.client.update_security_group( + sg['security_group']['id'], name='new_sg_name') + self.assertGreater(update_body['security_group']['revision'], + sg['security_group']['revision']) + + @test.idempotent_id('6489632f-8550-4453-a674-c98849742967') + @test.requires_ext(extension="security-group", service="network") + def test_update_port_sg_binding_bumps_revision(self): + net = self.create_network() + port = self.create_port(net) + sg = self._create_security_group()[0] + self.client.update_port( + port['id'], security_groups=[sg['security_group']['id']]) + updated = self.client.show_port(port['id']) + self.client.update_port(port['id'], security_groups=[]) + # TODO(kevinbenton): these extra shows after after the update are + # to work around the fact that ML2 creates the result dict before + # commit happens if the port is unbound. The update response should + # be usable directly once that is fixed. + updated2 = self.client.show_port(port['id']) + self.assertGreater(updated['port']['revision'], port['revision']) + self.assertGreater(updated2['port']['revision'], + updated['port']['revision']) + + @test.idempotent_id('29c7ab2b-d1d8-425d-8cec-fcf632960f22') + @test.requires_ext(extension="security-group", service="network") + def test_update_sg_rule_bumps_sg_revision(self): + sg, name = self._create_security_group() + rule = self.client.create_security_group_rule( + security_group_id=sg['security_group']['id'], + protocol='tcp', direction='ingress', ethertype=self.ethertype, + port_range_min=60, port_range_max=70) + updated = self.client.show_security_group(sg['security_group']['id']) + self.assertGreater(updated['security_group']['revision'], + sg['security_group']['revision']) + self.client.delete_security_group_rule( + rule['security_group_rule']['id']) + updated2 = self.client.show_security_group(sg['security_group']['id']) + self.assertGreater(updated2['security_group']['revision'], + updated['security_group']['revision']) + + @test.idempotent_id('4a37bde9-1975-47e0-9b8c-2c9ca36415b0') + @test.requires_ext(extension="router", service="network") + def test_update_router_bumps_revision(self): + subnet = self.create_subnet(self.create_network()) + router = self.create_router(router_name='test') + self.assertIn('revision', router) + rev1 = router['revision'] + router = self.client.update_router(router['id'], + name='test2')['router'] + self.assertGreater(router['revision'], rev1) + self.create_router_interface(router['id'], subnet['id']) + updated = self.client.show_router(router['id'])['router'] + self.assertGreater(updated['revision'], router['revision']) + + @test.idempotent_id('9de71ebc-f5df-4cd0-80bc-60299fce3ce9') + @test.requires_ext(extension="router", service="network") + @test.requires_ext(extension="standard-attr-description", + service="network") + def test_update_floatingip_bumps_revision(self): + ext_id = config.CONF.network.public_network_id + network = self.create_network() + subnet = self.create_subnet(network) + router = self.create_router('test', external_network_id=ext_id) + self.create_router_interface(router['id'], subnet['id']) + port = self.create_port(network) + body = self.client.create_floatingip( + floating_network_id=ext_id, + port_id=port['id'], + description='d1' + )['floatingip'] + self.assertIn('revision', body) + b2 = self.client.update_floatingip(body['id'], description='d2') + self.assertGreater(b2['floatingip']['revision'], body['revision']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_routers_negative.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_routers_negative.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_routers_negative.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_routers_negative.py 2016-08-03 20:10:34.000000000 +0000 @@ -24,6 +24,11 @@ class RoutersNegativeTestBase(base.BaseRouterTest): @classmethod + @test.requires_ext(extension="router", service="network") + def skip_checks(cls): + super(RoutersNegativeTestBase, cls).skip_checks() + + @classmethod def resource_setup(cls): super(RoutersNegativeTestBase, cls).resource_setup() cls.router = cls.create_router(data_utils.rand_name('router')) @@ -33,11 +38,6 @@ class RoutersNegativeTest(RoutersNegativeTestBase): - @classmethod - @test.requires_ext(extension="router", service="network") - def skip_checks(cls): - super(RoutersNegativeTest, cls).skip_checks() - @test.attr(type='negative') @test.idempotent_id('e3e751af-15a2-49cc-b214-a7154579e94f') def test_delete_router_in_use(self): @@ -49,6 +49,28 @@ self.client.delete_router(self.router['id']) +class RoutersNegativePolicyTest(RoutersNegativeTestBase): + + credentials = ['admin', 'primary', 'alt'] + + @test.attr(type='negative') + @test.idempotent_id('159f576d-a423-46b5-b501-622694c02f6b') + def test_add_interface_wrong_tenant(self): + client2 = self.alt_manager.network_client + network = client2.create_network()['network'] + self.addCleanup(client2.delete_network, network['id']) + subnet = self.create_subnet(network, client=client2) + # This port is deleted after a test by remove_router_interface. + port = client2.create_port(network_id=network['id'])['port'] + self.addCleanup(client2.delete_port, port['id']) + with testtools.ExpectedException(lib_exc.NotFound): + client2.add_router_interface_with_port_id( + self.router['id'], port['id']) + with testtools.ExpectedException(lib_exc.NotFound): + client2.add_router_interface_with_subnet_id( + self.router['id'], subnet['id']) + + class DvrRoutersNegativeTest(RoutersNegativeTestBase): @classmethod diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_routers.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_routers.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_routers.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_routers.py 2016-08-03 20:10:34.000000000 +0000 @@ -18,6 +18,7 @@ from tempest.lib.common.utils import data_utils from tempest import test +from neutron.common import utils from neutron.tests.tempest.api import base from neutron.tests.tempest.api import base_routers from neutron.tests.tempest import config @@ -153,6 +154,17 @@ 'enable_snat': False}) self._verify_gateway_port(router['id']) + @test.idempotent_id('db3093b1-93b6-4893-be83-c4716c251b3e') + def test_router_interface_status(self): + network = self.create_network() + subnet = self.create_subnet(network) + # Add router interface with subnet id + router = self._create_router(data_utils.rand_name('router-'), True) + intf = self.create_router_interface(router['id'], subnet['id']) + status_active = lambda: self.client.show_port( + intf['port_id'])['port']['status'] == 'ACTIVE' + utils.wait_until_true(status_active) + @test.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c') @test.requires_ext(extension='extraroute', service='network') def test_update_extra_route(self): @@ -262,47 +274,38 @@ for name in cls.resource_names: cls.create_router(router_name=name) - @test.attr(type='smoke') @test.idempotent_id('03a69efb-90a7-435b-bb5c-3add3612085a') def test_list_sorts_asc(self): self._test_list_sorts_asc() - @test.attr(type='smoke') @test.idempotent_id('95913d30-ff41-4b17-9f44-5258c651e78c') def test_list_sorts_desc(self): self._test_list_sorts_desc() - @test.attr(type='smoke') @test.idempotent_id('7f7d40b1-e165-4817-8dc5-02f8e2f0dff3') def test_list_pagination(self): self._test_list_pagination() - @test.attr(type='smoke') @test.idempotent_id('a5b83e83-3d98-45bb-a2c7-0ee179ffd42c') def test_list_pagination_with_marker(self): self._test_list_pagination_with_marker() - @test.attr(type='smoke') @test.idempotent_id('40804af8-c25d-45f8-b8a8-b4c70345215d') def test_list_pagination_with_href_links(self): self._test_list_pagination_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('77b9676c-d3cb-43af-a0e8-a5b8c6099e70') def test_list_pagination_page_reverse_asc(self): self._test_list_pagination_page_reverse_asc() - @test.attr(type='smoke') @test.idempotent_id('3133a2c5-1bb9-4fc7-833e-cf9a1d160255') def test_list_pagination_page_reverse_desc(self): self._test_list_pagination_page_reverse_desc() - @test.attr(type='smoke') @test.idempotent_id('8252e2f0-b3da-4738-8e25-f6f8d878a2da') def test_list_pagination_page_reverse_with_href_links(self): self._test_list_pagination_page_reverse_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('fb102124-20f8-4cb3-8c81-f16f5e41d192') def test_list_no_pagination_limit_0(self): self._test_list_no_pagination_limit_0() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_security_groups_negative.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_security_groups_negative.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_security_groups_negative.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_security_groups_negative.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,9 +17,6 @@ from tempest import test from neutron.tests.tempest.api import base_security_groups as base -from neutron.tests.tempest import config - -CONF = config.CONF class NegativeSecGroupTest(base.BaseSecGroupTest): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_subnetpools.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_subnetpools.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_subnetpools.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_subnetpools.py 2016-08-03 20:10:34.000000000 +0000 @@ -355,47 +355,38 @@ for name in cls.resource_names: cls._create_subnetpool(name=name) - @test.attr(type='smoke') @test.idempotent_id('6e3f842e-6bfb-49cb-82d3-0026be4e8e04') def test_list_sorts_asc(self): self._test_list_sorts_asc() - @test.attr(type='smoke') @test.idempotent_id('f336859b-b868-438c-a6fc-2c06374115f2') def test_list_sorts_desc(self): self._test_list_sorts_desc() - @test.attr(type='smoke') @test.idempotent_id('1291fae7-c196-4372-ad59-ce7988518f7b') def test_list_pagination(self): self._test_list_pagination() - @test.attr(type='smoke') @test.idempotent_id('ddb20d14-1952-49b4-a17e-231cc2239a52') def test_list_pagination_with_marker(self): self._test_list_pagination_with_marker() - @test.attr(type='smoke') @test.idempotent_id('b3bd9665-2769-4a43-b50c-31b1add12891') def test_list_pagination_with_href_links(self): self._test_list_pagination_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('1ec1f325-43b0-406e-96ce-20539e38a61d') def test_list_pagination_page_reverse_asc(self): self._test_list_pagination_page_reverse_asc() - @test.attr(type='smoke') @test.idempotent_id('f43a293e-4aaa-48f4-aeaf-de63a676357c') def test_list_pagination_page_reverse_desc(self): self._test_list_pagination_page_reverse_desc() - @test.attr(type='smoke') @test.idempotent_id('73511385-839c-4829-8ac1-b5ad992126c4') def test_list_pagination_page_reverse_with_href_links(self): self._test_list_pagination_page_reverse_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('82a13efc-c18f-4249-b8ec-cec7cf26fbd6') def test_list_no_pagination_limit_0(self): self._test_list_no_pagination_limit_0() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_subnets.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_subnets.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_subnets.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_subnets.py 2016-08-03 20:10:34.000000000 +0000 @@ -28,47 +28,38 @@ for name in cls.resource_names: cls.create_subnet(net, name=name) - @test.attr(type='smoke') @test.idempotent_id('d2d61995-5dd5-4b93-bce7-3edefdb79563') def test_list_sorts_asc(self): self._test_list_sorts_asc() - @test.attr(type='smoke') @test.idempotent_id('c3c6b0af-c4ac-4da0-b568-8d08ae550604') def test_list_sorts_desc(self): self._test_list_sorts_desc() - @test.attr(type='smoke') @test.idempotent_id('b93063b3-f713-406e-bf93-e5738e09153c') def test_list_pagination(self): self._test_list_pagination() - @test.attr(type='smoke') @test.idempotent_id('2ddd9aa6-de28-410f-9cbc-ce752893c407') def test_list_pagination_with_marker(self): self._test_list_pagination_with_marker() - @test.attr(type='smoke') @test.idempotent_id('351183ef-6ed9-4d71-a9f2-a5ac049bd7ea') def test_list_pagination_with_href_links(self): self._test_list_pagination_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('dfaa20ca-6d84-4f26-962f-2fee4d247cd9') def test_list_pagination_page_reverse_asc(self): self._test_list_pagination_page_reverse_asc() - @test.attr(type='smoke') @test.idempotent_id('40552213-3e12-4d6a-86f3-dda92f3de88c') def test_list_pagination_page_reverse_desc(self): self._test_list_pagination_page_reverse_desc() - @test.attr(type='smoke') @test.idempotent_id('3cea9053-a731-4480-93ee-19b2c28a9ce4') def test_list_pagination_page_reverse_with_href_links(self): self._test_list_pagination_page_reverse_with_href_links() - @test.attr(type='smoke') @test.idempotent_id('d851937c-9821-4b46-9d18-43e9077ecac0') def test_list_no_pagination_limit_0(self): self._test_list_no_pagination_limit_0() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_trunk_details.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_trunk_details.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_trunk_details.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_trunk_details.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,57 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import test + +from neutron.tests.tempest.api import test_trunk + + +class TestTrunkDetailsJSON(test_trunk.TrunkTestJSONBase): + + extension = 'trunk-details' + + @test.idempotent_id('f0bed24f-d36a-498b-b4e7-0d66e3fb7308') + def test_port_resource_trunk_details_no_subports(self): + trunk = self._create_trunk_with_network_and_parent([]) + port = self.client.show_port(trunk['trunk']['port_id']) + expected_trunk_details = {'sub_ports': [], + 'trunk_id': trunk['trunk']['id']} + observed_trunk_details = port['port'].get('trunk_details') + self.assertIsNotNone(observed_trunk_details) + self.assertEqual(expected_trunk_details, + observed_trunk_details) + + @test.idempotent_id('544bcaf2-86fb-4930-93ab-ece1c3cc33df') + def test_port_resource_trunk_details_with_subport(self): + subport_network = self.create_network() + parent_port = self.create_port(subport_network) + subport_data = {'port_id': parent_port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2} + trunk = self._create_trunk_with_network_and_parent([subport_data]) + port = self.client.show_port(trunk['trunk']['port_id']) + expected_trunk_details = {'sub_ports': [subport_data], + 'trunk_id': trunk['trunk']['id']} + observed_trunk_details = port['port'].get('trunk_details') + self.assertIsNotNone(observed_trunk_details) + self.assertEqual(expected_trunk_details, + observed_trunk_details) + + @test.idempotent_id('fe6d865f-1d5c-432e-b65d-904157172f24') + def test_port_resource_empty_trunk_details(self): + network = self.create_network() + port = self.create_port(network) + port = self.client.show_port(port['id']) + observed_trunk_details = port['port'].get('trunk_details') + self.assertIsNone(observed_trunk_details) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_trunk_negative.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_trunk_negative.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_trunk_negative.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_trunk_negative.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,237 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from tempest.lib import exceptions as lib_exc +from tempest import test + +from neutron.tests.tempest.api import test_trunk + + +class TrunkTestJSON(test_trunk.TrunkTestJSONBase): + + @test.attr(type='negative') + @test.idempotent_id('1b5cf87a-1d3a-4a94-ba64-647153d54f32') + def test_create_trunk_nonexistent_port_id(self): + self.assertRaises(lib_exc.NotFound, self.client.create_trunk, + uuidutils.generate_uuid(), []) + + @test.attr(type='negative') + @test.idempotent_id('980bca3b-b0be-45ac-8067-b401e445b796') + def test_create_trunk_nonexistent_subport_port_id(self): + network = self.create_network() + parent_port = self.create_port(network) + self.assertRaises(lib_exc.NotFound, self.client.create_trunk, + parent_port['id'], + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + + @test.attr(type='negative') + @test.idempotent_id('a5c5200a-72a0-43c5-a11a-52f808490344') + def test_create_subport_nonexistent_port_id(self): + trunk = self._create_trunk_with_network_and_parent([]) + self.assertRaises(lib_exc.NotFound, self.client.add_subports, + trunk['trunk']['id'], + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + + @test.attr(type='negative') + @test.idempotent_id('80deb6a9-da2a-48db-b7fd-bcef5b14edc1') + def test_create_subport_nonexistent_trunk(self): + network = self.create_network() + parent_port = self.create_port(network) + self.assertRaises(lib_exc.NotFound, self.client.add_subports, + uuidutils.generate_uuid(), + [{'port_id': parent_port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + + @test.attr(type='negative') + @test.idempotent_id('7e0f99ab-fe37-408b-a889-9e44ef300084') + def test_create_subport_missing_segmentation_id(self): + trunk = self._create_trunk_with_network_and_parent([]) + subport_network = self.create_network() + parent_port = self.create_port(subport_network) + self.assertRaises(lib_exc.BadRequest, self.client.add_subports, + trunk['trunk']['id'], + [{'port_id': parent_port['id'], + 'segmentation_type': 'vlan'}]) + + @test.attr(type='negative') + @test.idempotent_id('a315d78b-2f43-4efa-89ae-166044c568aa') + def test_create_trunk_with_subport_missing_segmentation_id(self): + subport_network = self.create_network() + parent_port = self.create_port(subport_network) + self.assertRaises(lib_exc.BadRequest, self.client.create_trunk, + parent_port['id'], + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'vlan'}]) + + @test.attr(type='negative') + @test.idempotent_id('33498618-f75a-4796-8ae6-93d4fd203fa4') + def test_create_trunk_with_subport_missing_segmentation_type(self): + subport_network = self.create_network() + parent_port = self.create_port(subport_network) + self.assertRaises(lib_exc.BadRequest, self.client.create_trunk, + parent_port['id'], + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_id': 3}]) + + @test.attr(type='negative') + @test.idempotent_id('a717691c-4e07-4d81-a98d-6f1c18c5d183') + def test_create_trunk_with_subport_missing_port_id(self): + subport_network = self.create_network() + parent_port = self.create_port(subport_network) + self.assertRaises(lib_exc.BadRequest, self.client.create_trunk, + parent_port['id'], + [{'segmentation_type': 'vlan', + 'segmentation_id': 3}]) + + @test.attr(type='negative') + @test.idempotent_id('40aed9be-e976-47d0-a555-bde2c7e74e57') + def test_create_trunk_duplicate_subport_segmentation_ids(self): + trunk = self._create_trunk_with_network_and_parent([]) + subport_network1 = self.create_network() + subport_network2 = self.create_network() + parent_port1 = self.create_port(subport_network1) + parent_port2 = self.create_port(subport_network2) + self.assertRaises(lib_exc.BadRequest, self.client.create_trunk, + trunk['trunk']['id'], + [{'port_id': parent_port1['id'], + 'segmentation_id': 2, + 'segmentation_type': 'vlan'}, + {'port_id': parent_port2['id'], + 'segmentation_id': 2, + 'segmentation_type': 'vlan'}]) + + @test.attr(type='negative') + @test.idempotent_id('6f132ccc-1380-42d8-9c44-50411612bd01') + def test_add_subport_port_id_uses_trunk_port_id(self): + trunk = self._create_trunk_with_network_and_parent(None) + self.assertRaises(lib_exc.Conflict, self.client.add_subports, + trunk['trunk']['id'], + [{'port_id': trunk['trunk']['port_id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + + @test.attr(type='negative') + @test.idempotent_id('7f132ccc-1380-42d8-9c44-50411612bd01') + def test_add_subport_port_id_disabled_trunk(self): + trunk = self._create_trunk_with_network_and_parent( + None, admin_state_up=False) + self.assertRaises(lib_exc.Conflict, + self.client.add_subports, + trunk['trunk']['id'], + [{'port_id': trunk['trunk']['port_id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + self.client.update_trunk( + trunk['trunk']['id'], admin_state_up=True) + + @test.attr(type='negative') + @test.idempotent_id('8f132ccc-1380-42d8-9c44-50411612bd01') + def test_remove_subport_port_id_disabled_trunk(self): + trunk = self._create_trunk_with_network_and_parent( + None, admin_state_up=False) + self.assertRaises(lib_exc.Conflict, + self.client.remove_subports, + trunk['trunk']['id'], + [{'port_id': trunk['trunk']['port_id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + self.client.update_trunk( + trunk['trunk']['id'], admin_state_up=True) + + @test.attr(type='negative') + @test.idempotent_id('9f132ccc-1380-42d8-9c44-50411612bd01') + def test_delete_trunk_disabled_trunk(self): + trunk = self._create_trunk_with_network_and_parent( + None, admin_state_up=False) + self.assertRaises(lib_exc.Conflict, + self.client.delete_trunk, + trunk['trunk']['id']) + self.client.update_trunk( + trunk['trunk']['id'], admin_state_up=True) + + @test.attr(type='negative') + @test.idempotent_id('00cb40bb-1593-44c8-808c-72b47e64252f') + def test_add_subport_duplicate_segmentation_details(self): + trunk = self._create_trunk_with_network_and_parent(None) + network = self.create_network() + parent_port1 = self.create_port(network) + parent_port2 = self.create_port(network) + self.client.add_subports(trunk['trunk']['id'], + [{'port_id': parent_port1['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + self.assertRaises(lib_exc.Conflict, self.client.add_subports, + trunk['trunk']['id'], + [{'port_id': parent_port2['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]) + + @test.attr(type='negative') + @test.idempotent_id('4eac8c25-83ee-4051-9620-34774f565730') + def test_add_subport_passing_dict(self): + trunk = self._create_trunk_with_network_and_parent(None) + self.assertRaises(lib_exc.BadRequest, self.client.add_subports, + trunk['trunk']['id'], + {'port_id': trunk['trunk']['port_id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}) + + @test.attr(type='negative') + @test.idempotent_id('17ca7dd7-96a8-445a-941e-53c0c86c2fe2') + def test_remove_subport_passing_dict(self): + network = self.create_network() + parent_port = self.create_port(network) + subport_data = {'port_id': parent_port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2} + trunk = self._create_trunk_with_network_and_parent([subport_data]) + self.assertRaises(lib_exc.BadRequest, self.client.remove_subports, + trunk['trunk']['id'], subport_data) + + @test.attr(type='negative') + @test.idempotent_id('aaca7dd7-96b8-445a-931e-63f0d86d2fe2') + def test_remove_subport_not_found(self): + network = self.create_network() + parent_port = self.create_port(network) + subport_data = {'port_id': parent_port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2} + trunk = self._create_trunk_with_network_and_parent([]) + self.assertRaises(lib_exc.NotFound, self.client.remove_subports, + trunk['trunk']['id'], [subport_data]) + + @test.attr(type='negative') + @test.idempotent_id('6c9c5126-4f61-11e6-8248-40a8f063c891') + def test_delete_port_in_use_by_trunk(self): + trunk = self._create_trunk_with_network_and_parent(None) + self.assertRaises(lib_exc.Conflict, self.client.delete_port, + trunk['trunk']['port_id']) + + @test.attr(type='negative') + @test.idempotent_id('343a03d0-4f7c-11e6-97fa-40a8f063c891') + def test_delete_port_in_use_by_subport(self): + network = self.create_network() + port = self.create_port(network) + subports = [{'port_id': port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}] + self._create_trunk_with_network_and_parent(subports) + self.assertRaises(lib_exc.Conflict, self.client.delete_port, + port['id']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_trunk.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_trunk.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/api/test_trunk.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/api/test_trunk.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,249 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.lib.common.utils import test_utils +from tempest.lib import exceptions as lib_exc +from tempest import test + +from neutron.tests.tempest.api import base + + +def trunks_cleanup(client, trunks): + for trunk in trunks: + # NOTE(armax): deleting a trunk with subports is permitted, however + # for testing purposes it is safer to be explicit and clean all the + # resources associated with the trunk beforehand. + subports = test_utils.call_and_ignore_notfound_exc( + client.get_subports, trunk['id']) + if subports: + client.remove_subports( + trunk['id'], subports['sub_ports']) + test_utils.call_and_ignore_notfound_exc( + client.delete_trunk, trunk['id']) + + +class TrunkTestJSONBase(base.BaseAdminNetworkTest): + + extension = 'trunk' + + def setUp(self): + self.addCleanup(self.resource_cleanup) + super(TrunkTestJSONBase, self).setUp() + + @classmethod + def skip_checks(cls): + super(TrunkTestJSONBase, cls).skip_checks() + if not test.is_extension_enabled(cls.extension, 'network'): + msg = "%s extension not enabled." % cls.extension + raise cls.skipException(msg) + + @classmethod + def resource_setup(cls): + super(TrunkTestJSONBase, cls).resource_setup() + cls.trunks = [] + + @classmethod + def resource_cleanup(cls): + trunks_cleanup(cls.client, cls.trunks) + super(TrunkTestJSONBase, cls).resource_cleanup() + + def _create_trunk_with_network_and_parent(self, subports, **kwargs): + network = self.create_network() + parent_port = self.create_port(network) + trunk = self.client.create_trunk(parent_port['id'], subports, **kwargs) + self.trunks.append(trunk['trunk']) + return trunk + + +class TrunkTestJSON(TrunkTestJSONBase): + + @test.idempotent_id('e1a6355c-4768-41f3-9bf8-0f1d192bd501') + def test_create_trunk_empty_subports_list(self): + trunk = self._create_trunk_with_network_and_parent([]) + observed_trunk = self.client.show_trunk(trunk['trunk']['id']) + self.assertEqual(trunk, observed_trunk) + + @test.idempotent_id('382dfa39-ca03-4bd3-9a1c-91e36d2e3796') + def test_create_trunk_subports_not_specified(self): + trunk = self._create_trunk_with_network_and_parent(None) + observed_trunk = self.client.show_trunk(trunk['trunk']['id']) + self.assertEqual(trunk, observed_trunk) + + @test.idempotent_id('7de46c22-e2b6-4959-ac5a-0e624632ab32') + def test_create_show_delete_trunk(self): + trunk = self._create_trunk_with_network_and_parent(None) + trunk_id = trunk['trunk']['id'] + parent_port_id = trunk['trunk']['port_id'] + res = self.client.show_trunk(trunk_id) + self.assertEqual(trunk_id, res['trunk']['id']) + self.assertEqual(parent_port_id, res['trunk']['port_id']) + self.client.delete_trunk(trunk_id) + self.assertRaises(lib_exc.NotFound, self.client.show_trunk, trunk_id) + + @test.idempotent_id('4ce46c22-a2b6-4659-bc5a-0ef2463cab32') + def test_create_update_trunk(self): + trunk = self._create_trunk_with_network_and_parent(None) + trunk_id = trunk['trunk']['id'] + res = self.client.show_trunk(trunk_id) + self.assertTrue(res['trunk']['admin_state_up']) + self.assertEqual("", res['trunk']['name']) + res = self.client.update_trunk( + trunk_id, name='foo', admin_state_up=False) + self.assertFalse(res['trunk']['admin_state_up']) + self.assertEqual("foo", res['trunk']['name']) + # enable the trunk so that it can be managed + self.client.update_trunk(trunk_id, admin_state_up=True) + + @test.idempotent_id('73365f73-bed6-42cd-960b-ec04e0c99d85') + def test_list_trunks(self): + trunk1 = self._create_trunk_with_network_and_parent(None) + trunk2 = self._create_trunk_with_network_and_parent(None) + expected_trunks = {trunk1['trunk']['id']: trunk1['trunk'], + trunk2['trunk']['id']: trunk2['trunk']} + trunk_list = self.client.list_trunks()['trunks'] + matched_trunks = [x for x in trunk_list if x['id'] in expected_trunks] + self.assertEqual(2, len(matched_trunks)) + for trunk in matched_trunks: + self.assertEqual(expected_trunks[trunk['id']], trunk) + + @test.idempotent_id('bb5fcead-09b5-484a-bbe6-46d1e06d6cc0') + def test_add_subport(self): + trunk = self._create_trunk_with_network_and_parent([]) + network = self.create_network() + port = self.create_port(network) + subports = [{'port_id': port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}] + self.client.add_subports(trunk['trunk']['id'], subports) + trunk = self.client.show_trunk(trunk['trunk']['id']) + observed_subports = trunk['trunk']['sub_ports'] + self.assertEqual(1, len(observed_subports)) + created_subport = observed_subports[0] + self.assertEqual(subports[0], created_subport) + + @test.idempotent_id('ee5fcead-1abf-483a-bce6-43d1e06d6aa0') + def test_delete_trunk_with_subport_is_allowed(self): + network = self.create_network() + port = self.create_port(network) + subports = [{'port_id': port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}] + trunk = self._create_trunk_with_network_and_parent(subports) + self.client.delete_trunk(trunk['trunk']['id']) + + @test.idempotent_id('96eea398-a03c-4c3e-a99e-864392c2ca53') + def test_remove_subport(self): + subport_parent1 = self.create_port(self.create_network()) + subport_parent2 = self.create_port(self.create_network()) + subports = [{'port_id': subport_parent1['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}, + {'port_id': subport_parent2['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 4}] + trunk = self._create_trunk_with_network_and_parent(subports) + removed_subport = trunk['trunk']['sub_ports'][0] + expected_subport = None + + for subport in subports: + if subport['port_id'] != removed_subport['port_id']: + expected_subport = subport + break + + # Remove the subport and validate PUT response + res = self.client.remove_subports(trunk['trunk']['id'], + [removed_subport]) + self.assertEqual(1, len(res['sub_ports'])) + self.assertEqual(expected_subport, res['sub_ports'][0]) + + # Validate the results of a subport list + trunk = self.client.show_trunk(trunk['trunk']['id']) + observed_subports = trunk['trunk']['sub_ports'] + self.assertEqual(1, len(observed_subports)) + self.assertEqual(expected_subport, observed_subports[0]) + + @test.idempotent_id('bb5fcaad-09b5-484a-dde6-4cd1ea6d6ff0') + def test_get_subports(self): + network = self.create_network() + port = self.create_port(network) + subports = [{'port_id': port['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}] + trunk = self._create_trunk_with_network_and_parent(subports) + trunk = self.client.get_subports(trunk['trunk']['id']) + observed_subports = trunk['sub_ports'] + self.assertEqual(1, len(observed_subports)) + + +class TrunksSearchCriteriaTest(base.BaseSearchCriteriaTest): + + resource = 'trunk' + + @classmethod + def skip_checks(cls): + super(TrunksSearchCriteriaTest, cls).skip_checks() + if not test.is_extension_enabled('trunk', 'network'): + msg = "trunk extension not enabled." + raise cls.skipException(msg) + + @classmethod + def resource_setup(cls): + super(TrunksSearchCriteriaTest, cls).resource_setup() + cls.trunks = [] + net = cls.create_network(network_name='trunk-search-test-net') + for name in cls.resource_names: + parent_port = cls.create_port(net) + trunk = cls.client.create_trunk(parent_port['id'], [], name=name) + cls.trunks.append(trunk['trunk']) + + @classmethod + def resource_cleanup(cls): + trunks_cleanup(cls.client, cls.trunks) + super(TrunksSearchCriteriaTest, cls).resource_cleanup() + + @test.idempotent_id('fab73df4-960a-4ae3-87d3-60992b8d3e2d') + def test_list_sorts_asc(self): + self._test_list_sorts_asc() + + @test.idempotent_id('a426671d-7270-430f-82ff-8f33eec93010') + def test_list_sorts_desc(self): + self._test_list_sorts_desc() + + @test.idempotent_id('b202fdc8-6616-45df-b6a0-463932de6f94') + def test_list_pagination(self): + self._test_list_pagination() + + @test.idempotent_id('c4723b8e-8186-4b9a-bf9e-57519967e048') + def test_list_pagination_with_marker(self): + self._test_list_pagination_with_marker() + + @test.idempotent_id('dcd02a7a-f07e-4d5e-b0ca-b58e48927a9b') + def test_list_pagination_with_href_links(self): + self._test_list_pagination_with_href_links() + + @test.idempotent_id('eafe7024-77ab-4cfe-824b-0b2bf4217727') + def test_list_no_pagination_limit_0(self): + self._test_list_no_pagination_limit_0() + + @test.idempotent_id('f8857391-dc44-40cc-89b7-2800402e03ce') + def test_list_pagination_page_reverse_asc(self): + self._test_list_pagination_page_reverse_asc() + + @test.idempotent_id('ae51e9c9-ceae-4ec0-afd4-147569247699') + def test_list_pagination_page_reverse_desc(self): + self._test_list_pagination_page_reverse_desc() + + @test.idempotent_id('b4293e59-d794-4a93-be09-38667199ef68') + def test_list_pagination_page_reverse_with_href_links(self): + self._test_list_pagination_page_reverse_with_href_links() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/config.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/config.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/config.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/config.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,7 +12,6 @@ from oslo_config import cfg -from neutron import api from tempest import config @@ -23,13 +22,7 @@ cfg.BoolOpt('specify_floating_ip_address_available', default=True, help='Allow passing an IP Address of the floating ip when ' - 'creating the floating ip'), - cfg.BoolOpt('validate_pagination', - default=api.DEFAULT_ALLOW_PAGINATION, - help='Validate pagination'), - cfg.BoolOpt('validate_sorting', - default=api.DEFAULT_ALLOW_SORTING, - help='Validate sorting')] + 'creating the floating ip')] # TODO(amuller): Redo configuration options registration as part of the planned # transition to the Tempest plugin architecture diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/base.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/base.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/base.py 2016-08-03 20:10:34.000000000 +0000 @@ -12,7 +12,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslo_log import log as logging from tempest.common import waiters from tempest.lib.common import ssh @@ -23,7 +22,6 @@ from neutron.tests.tempest.scenario import constants CONF = config.CONF -LOG = logging.getLogger(__name__) class BaseTempestTestCase(base_api.BaseNetworkTest): @@ -68,7 +66,7 @@ return body['keypair'] @classmethod - def create_loginable_secgroup_rule(cls, secgroup_id=None): + def create_secgroup_rules(cls, rule_list, secgroup_id=None): client = cls.manager.network_client if not secgroup_id: sgs = client.list_security_groups()['security_groups'] @@ -77,18 +75,29 @@ secgroup_id = sg['id'] break - # This rule is intended to permit inbound ssh - # traffic from all sources, so no group_id is provided. - # Setting a group_id would only permit traffic from ports - # belonging to the same security group. - ruleset = {'protocol': 'tcp', - 'port_range_min': 22, - 'port_range_max': 22, - 'remote_ip_prefix': '0.0.0.0/0'} - rules = [client.create_security_group_rule( - direction='ingress', security_group_id=secgroup_id, - **ruleset)['security_group_rule']] - return rules + for rule in rule_list: + direction = rule.pop('direction') + client.create_security_group_rule( + direction=direction, + security_group_id=secgroup_id, + **rule) + + @classmethod + def create_loginable_secgroup_rule(cls, secgroup_id=None): + """This rule is intended to permit inbound ssh + + Allowing ssh traffic traffic from all sources, so no group_id is + provided. + Setting a group_id would only permit traffic from ports + belonging to the same security group. + """ + + rule_list = [{'protocol': 'tcp', + 'direction': 'ingress', + 'port_range_min': 22, + 'port_range_max': 22, + 'remote_ip_prefix': '0.0.0.0/0'}] + cls.create_secgroup_rules(rule_list, secgroup_id=secgroup_id) @classmethod def create_router_and_interface(cls, subnet_id): @@ -111,3 +120,24 @@ def check_connectivity(cls, host, ssh_user, ssh_key=None): ssh_client = ssh.Client(host, ssh_user, pkey=ssh_key) ssh_client.test_connection_auth() + + @classmethod + def setup_network_and_server(cls): + cls.network = cls.create_network() + cls.subnet = cls.create_subnet(cls.network) + + cls.create_router_and_interface(cls.subnet['id']) + cls.keypair = cls.create_keypair() + cls.create_loginable_secgroup_rule() + cls.server = cls.create_server( + flavor_ref=CONF.compute.flavor_ref, + image_ref=CONF.compute.image_ref, + key_name=cls.keypair['name'], + networks=[{'uuid': cls.network['id']}]) + waiters.wait_for_server_status(cls.manager.servers_client, + cls.server['server']['id'], + constants.SERVER_STATUS_ACTIVE) + port = cls.client.list_ports(network_id=cls.network['id'], + device_id=cls.server[ + 'server']['id'])['ports'][0] + cls.fip = cls.create_and_associate_floatingip(port['id']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/constants.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/constants.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/constants.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/constants.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,3 +14,5 @@ SERVER_STATUS_ACTIVE = 'ACTIVE' DEFAULT_SECURITY_GROUP = 'default' +LIMIT_KILO_BITS_PER_SECOND = 1000 +SOCKET_CONNECT_TIMEOUT = 60 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/exceptions.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/exceptions.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/exceptions.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/exceptions.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,33 @@ +# Copyright 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from tempest.lib import exceptions + +TempestException = exceptions.TempestException + + +class QoSLimitReached(TempestException): + message = "Limit reached, limit = %(limit)d" + + +class SocketConnectionRefused(TempestException): + message = "Unable to connect to %(host)s port %(port)d:Connection Refused" + + +class ConnectionTimeoutException(TempestException): + message = "Timeout connecting to %(host)s port %(port)d" + + +class FileCreationFailedException(TempestException): + message = "File %(file)s has not been created or has the wrong size" diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/test_basic.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/test_basic.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/test_basic.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/test_basic.py 2016-08-03 20:10:34.000000000 +0000 @@ -12,16 +12,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from oslo_log import log as logging -from tempest.common import waiters from tempest import test from neutron.tests.tempest import config from neutron.tests.tempest.scenario import base -from neutron.tests.tempest.scenario import constants CONF = config.CONF -LOG = logging.getLogger(__name__) class NetworkBasicTest(base.BaseTempestTestCase): @@ -33,24 +29,7 @@ @test.idempotent_id('de07fe0a-e955-449e-b48b-8641c14cd52e') def test_basic_instance(self): - network = self.create_network() - subnet = self.create_subnet(network) - - self.create_router_and_interface(subnet['id']) - keypair = self.create_keypair() - self.create_loginable_secgroup_rule() - server = self.create_server( - flavor_ref=CONF.compute.flavor_ref, - image_ref=CONF.compute.image_ref, - key_name=keypair['name'], - networks=[{'uuid': network['id']}]) - waiters.wait_for_server_status(self.manager.servers_client, - server['server']['id'], - constants.SERVER_STATUS_ACTIVE) - port = self.client.list_ports(network_id=network['id'], - device_id=server[ - 'server']['id'])['ports'][0] - fip = self.create_and_associate_floatingip(port['id']) - self.check_connectivity(fip['floating_ip_address'], + self.setup_network_and_server() + self.check_connectivity(self.fip['floating_ip_address'], CONF.validation.image_ssh_user, - keypair['private_key']) + self.keypair['private_key']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/test_qos.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/test_qos.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/scenario/test_qos.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/scenario/test_qos.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,184 @@ +# Copyright 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import errno +import socket +import time + +from oslo_log import log as logging +from tempest.lib.common import ssh +from tempest.lib import exceptions +from tempest import test + +from neutron.common import utils +from neutron.tests.tempest import config +from neutron.tests.tempest.scenario import base +from neutron.tests.tempest.scenario import constants +from neutron.tests.tempest.scenario import exceptions as sc_exceptions + +CONF = config.CONF +LOG = logging.getLogger(__name__) + + +def _try_connect(host_ip, port): + try: + client_socket = socket.socket(socket.AF_INET, + socket.SOCK_STREAM) + client_socket.connect((host_ip, port)) + client_socket.setblocking(0) + return client_socket + except socket.error as serr: + if serr.errno == errno.ECONNREFUSED: + raise sc_exceptions.SocketConnectionRefused(host=host_ip, + port=port) + else: + raise + + +def _connect_socket(host, port): + """Try to initiate a connection to a host using an ip address + and a port. + + Trying couple of times until a timeout is reached in case the listening + host is not ready yet. + """ + + start = time.time() + while True: + try: + return _try_connect(host, port) + except sc_exceptions.SocketConnectionRefused: + if time.time() - start > constants.SOCKET_CONNECT_TIMEOUT: + raise sc_exceptions.ConnectionTimeoutException(host=host, + port=port) + + +class QoSTest(base.BaseTempestTestCase): + credentials = ['primary', 'admin'] + force_tenant_isolation = False + + BUFFER_SIZE = 1024 * 1024 + TOLERANCE_FACTOR = 1.5 + BS = 512 + COUNT = BUFFER_SIZE / BS + FILE_SIZE = BS * COUNT + LIMIT_BYTES_SEC = (constants.LIMIT_KILO_BITS_PER_SECOND * 1024 + * TOLERANCE_FACTOR / 8.0) + FILE_PATH = "/tmp/img" + + @classmethod + @test.requires_ext(extension="qos", service="network") + def resource_setup(cls): + super(QoSTest, cls).resource_setup() + + def _create_file_for_bw_tests(self, ssh_client): + cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) " + % {'bs': QoSTest.BS, 'count': QoSTest.COUNT, + 'file_path': QoSTest.FILE_PATH}) + ssh_client.exec_command(cmd) + cmd = "stat -c %%s %s" % QoSTest.FILE_PATH + filesize = ssh_client.exec_command(cmd) + if int(filesize.strip()) != QoSTest.FILE_SIZE: + raise sc_exceptions.FileCreationFailedException( + file=QoSTest.FILE_PATH) + + def _check_bw(self, ssh_client, host, port): + total_bytes_read = 0 + cycle_start_time = time.time() + cycle_data_read = 0 + + cmd = "killall -q nc" + try: + ssh_client.exec_command(cmd) + except exceptions.SSHExecCommandFailed: + pass + cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % { + 'port': port, 'file_path': QoSTest.FILE_PATH}) + ssh_client.exec_command(cmd) + client_socket = _connect_socket(host, port) + + while total_bytes_read < QoSTest.FILE_SIZE: + try: + data = client_socket.recv(QoSTest.BUFFER_SIZE) + except socket.error as e: + if e.args[0] in [errno.EAGAIN, errno.EWOULDBLOCK]: + continue + else: + raise + total_bytes_read += len(data) + cycle_data_read += len(data) + time_elapsed = time.time() - cycle_start_time + should_check = (time_elapsed >= 5 or + total_bytes_read == QoSTest.FILE_SIZE) + if should_check: + LOG.debug("time_elapsed = %(time_elapsed)d," + "total_bytes_read = %(bytes_read)d," + "cycle_data_read = %(cycle_data)d", + {"time_elapsed": time_elapsed, + "bytes_read": total_bytes_read, + "cycle_data": cycle_data_read}) + + if cycle_data_read / time_elapsed > QoSTest.LIMIT_BYTES_SEC: + # Limit reached + return False + else: + cycle_start_time = time.time() + cycle_data_read = 0 + return True + + @test.idempotent_id('1f7ed39b-428f-410a-bd2b-db9f465680df') + def test_qos(self): + """This is a basic test that check that a QoS policy with + + a bandwidth limit rule is applied correctly by sending + a file from the instance to the test node. + Then calculating the bandwidth every ~1 sec by the number of bits + received / elapsed time. + """ + + NC_PORT = 1234 + + self.setup_network_and_server() + self.check_connectivity(self.fip['floating_ip_address'], + CONF.validation.image_ssh_user, + self.keypair['private_key']) + rulesets = [{'protocol': 'tcp', + 'direction': 'ingress', + 'port_range_min': NC_PORT, + 'port_range_max': NC_PORT, + 'remote_ip_prefix': '0.0.0.0/0'}] + self.create_secgroup_rules(rulesets) + ssh_client = ssh.Client(self.fip['floating_ip_address'], + CONF.validation.image_ssh_user, + pkey=self.keypair['private_key']) + policy = self.admin_manager.network_client.create_qos_policy( + name='test-policy', + description='test-qos-policy', + shared=True) + policy_id = policy['policy']['id'] + self.admin_manager.network_client.create_bandwidth_limit_rule( + policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND, + max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND) + port = self.client.list_ports(network_id=self.network['id'], + device_id=self.server[ + 'server']['id'])['ports'][0] + self.admin_manager.network_client.update_port(port['id'], + qos_policy_id=policy_id) + self._create_file_for_bw_tests(ssh_client) + utils.wait_until_true(lambda: self._check_bw( + ssh_client, + self.fip['floating_ip_address'], + port=NC_PORT), + timeout=120, + sleep=1) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tempest/services/network/json/network_client.py neutron-9.0.0~b3~dev557/neutron/tests/tempest/services/network/json/network_client.py --- neutron-9.0.0~b2~dev280/neutron/tests/tempest/services/network/json/network_client.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tempest/services/network/json/network_client.py 2016-08-29 20:05:49.000000000 +0000 @@ -659,6 +659,78 @@ body = jsonutils.loads(body) return service_client.ResponseBody(resp, body) + def create_trunk(self, parent_port_id, subports, + tenant_id=None, name=None, admin_state_up=None): + uri = '%s/trunks' % self.uri_prefix + post_data = { + 'trunk': { + 'port_id': parent_port_id, + } + } + if subports is not None: + post_data['trunk']['sub_ports'] = subports + if tenant_id is not None: + post_data['trunk']['tenant_id'] = tenant_id + if name is not None: + post_data['trunk']['name'] = name + if admin_state_up is not None: + post_data['trunk']['admin_state_up'] = admin_state_up + resp, body = self.post(uri, self.serialize(post_data)) + body = self.deserialize_single(body) + self.expected_success(201, resp.status) + return service_client.ResponseBody(resp, body) + + def update_trunk(self, trunk_id, **kwargs): + put_body = {'trunk': kwargs} + body = jsonutils.dumps(put_body) + uri = '%s/trunks/%s' % (self.uri_prefix, trunk_id) + resp, body = self.put(uri, body) + self.expected_success(200, resp.status) + body = jsonutils.loads(body) + return service_client.ResponseBody(resp, body) + + def show_trunk(self, trunk_id): + uri = '%s/trunks/%s' % (self.uri_prefix, trunk_id) + resp, body = self.get(uri) + body = self.deserialize_single(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def list_trunks(self, **kwargs): + uri = '%s/trunks' % self.uri_prefix + if kwargs: + uri += '?' + urlparse.urlencode(kwargs, doseq=1) + resp, body = self.get(uri) + self.expected_success(200, resp.status) + body = self.deserialize_single(body) + return service_client.ResponseBody(resp, body) + + def delete_trunk(self, trunk_id): + uri = '%s/trunks/%s' % (self.uri_prefix, trunk_id) + resp, body = self.delete(uri) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + + def _subports_action(self, action, trunk_id, subports): + uri = '%s/trunks/%s/%s' % (self.uri_prefix, trunk_id, action) + resp, body = self.put(uri, jsonutils.dumps({'sub_ports': subports})) + body = self.deserialize_single(body) + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) + + def add_subports(self, trunk_id, subports): + return self._subports_action('add_subports', trunk_id, subports) + + def remove_subports(self, trunk_id, subports): + return self._subports_action('remove_subports', trunk_id, subports) + + def get_subports(self, trunk_id): + uri = '%s/trunks/%s/%s' % (self.uri_prefix, trunk_id, 'get_subports') + resp, body = self.get(uri) + self.expected_success(200, resp.status) + body = jsonutils.loads(body) + return service_client.ResponseBody(resp, body) + def get_auto_allocated_topology(self, tenant_id=None): uri = '%s/auto-allocated-topology/%s' % (self.uri_prefix, tenant_id) resp, body = self.get(uri) @@ -666,6 +738,12 @@ body = jsonutils.loads(body) return service_client.ResponseBody(resp, body) + def delete_auto_allocated_topology(self, tenant_id=None): + uri = '%s/auto-allocated-topology/%s' % (self.uri_prefix, tenant_id) + resp, body = self.delete(uri) + self.expected_success(204, resp.status) + return service_client.ResponseBody(resp, body) + def create_security_group_rule(self, direction, security_group_id, **kwargs): post_body = {'security_group_rule': kwargs} @@ -719,3 +797,12 @@ self.expected_success(201, resp.status) body = jsonutils.loads(body) return service_client.ResponseBody(resp, body) + + def list_extensions(self, **filters): + uri = self.get_uri("extensions") + if filters: + uri = '?'.join([uri, urlparse.urlencode(filters)]) + resp, body = self.get(uri) + body = {'extensions': self.deserialize_list(body)} + self.expected_success(200, resp.status) + return service_client.ResponseBody(resp, body) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/tools.py neutron-9.0.0~b3~dev557/neutron/tests/tools.py --- neutron-9.0.0~b2~dev280/neutron/tests/tools.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/tools.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,15 +14,14 @@ # under the License. import copy -import importlib import os import platform import random import string -import sys import time import warnings +from debtcollector import moves import fixtures import mock import netaddr @@ -30,10 +29,10 @@ import six import unittest2 -import neutron from neutron.api.v2 import attributes from neutron.common import constants as n_const from neutron.common import ipv6_utils +from neutron.common import utils from neutron.db import common_db_mixin @@ -132,9 +131,6 @@ common_db_mixin.CommonDbMixin._model_query_hooks = self.original_hooks -from neutron.common import utils - - def setup_mock_calls(mocked_call, expected_calls_and_values): """A convenient method to setup a sequence of mock calls. @@ -204,36 +200,6 @@ return not self == other -def import_modules_recursively(topdir): - '''Import and return all modules below the topdir directory.''' - modules = [] - for root, dirs, files in os.walk(topdir): - for file_ in files: - if file_[-3:] != '.py': - continue - - module = file_[:-3] - if module == '__init__': - continue - - import_base = root.replace('/', '.') - - # NOTE(ihrachys): in Python3, or when we are not located in the - # directory containing neutron code, __file__ is absolute, so we - # should truncate it to exclude PYTHONPATH prefix - prefixlen = len(os.path.dirname(neutron.__file__)) - import_base = 'neutron' + import_base[prefixlen:] - - module = '.'.join([import_base, module]) - if module not in sys.modules: - importlib.import_module(module) - modules.append(module) - - for dir_ in dirs: - modules.extend(import_modules_recursively(dir_)) - return modules - - def get_random_string(n=10): return ''.join(random.choice(string.ascii_lowercase) for _ in range(n)) @@ -253,6 +219,10 @@ return random.randint(0, maxlen) +def get_random_port(): + return random.randint(n_const.PORT_RANGE_MIN, n_const.PORT_RANGE_MAX) + + def get_random_ip_version(): return random.choice(n_const.IP_ALLOWED_VERSIONS) @@ -294,6 +264,18 @@ return ip +def get_random_flow_direction(): + return random.choice(n_const.VALID_DIRECTIONS) + + +def get_random_ether_type(): + return random.choice(n_const.VALID_ETHERTYPES) + + +def get_random_ip_protocol(): + return random.choice(list(constants.IP_PROTOCOL_MAP.keys())) + + def is_bsd(): """Return True on BSD-based systems.""" @@ -316,4 +298,9 @@ def get_random_ipv6_mode(): - return random.choice(n_const.IPV6_MODES) + return random.choice(constants.IPV6_MODES) + + +import_modules_recursively = moves.moved_function( + utils.import_modules_recursively, 'import_modules_recursively', __name__, + version='Newton', removal_version='Ocata') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/common/test_ovs_lib.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/common/test_ovs_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/common/test_ovs_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/common/test_ovs_lib.py 2016-08-03 20:10:34.000000000 +0000 @@ -20,6 +20,7 @@ from oslo_utils import uuidutils import testtools +from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.common import utils from neutron.plugins.common import constants @@ -59,6 +60,16 @@ __repr__ = __str__ +def vsctl_only(f): + # NOTE(ivasilevskaya) as long as some tests rely heavily on mocking + # direct vsctl commands, need to ensure that ovsdb_interface = 'vsctl' + # TODO(ivasilevskaya) introduce alternative tests for native interface? + def wrapper(*args, **kwargs): + config.cfg.CONF.set_override("ovsdb_interface", "vsctl", group="OVS") + return f(*args, **kwargs) + return wrapper + + class OVS_Lib_Test(base.BaseTestCase): """A test suite to exercise the OVS libraries shared by Neutron agents. @@ -66,6 +77,7 @@ can run on any system. That does, however, limit their scope. """ + @vsctl_only def setUp(self): super(OVS_Lib_Test, self).setUp() self.BR_NAME = "br-int" @@ -906,11 +918,13 @@ with ovs_lib.DeferredOVSBridge(self.br) as deferred_br: self.assertRaises(AttributeError, getattr, deferred_br, 'failure') + @vsctl_only def test_default_cookie(self): self.br = ovs_lib.OVSBridge("br-tun") uuid_stamp1 = self.br.default_cookie self.assertEqual(uuid_stamp1, self.br.default_cookie) + @vsctl_only def test_cookie_passed_to_addmod(self): self.br = ovs_lib.OVSBridge("br-tun") stamp = str(self.br.default_cookie) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/dhcp/test_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/dhcp/test_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/dhcp/test_agent.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/dhcp/test_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -28,13 +28,13 @@ from neutron.agent.common import config from neutron.agent.dhcp import agent as dhcp_agent -from neutron.agent.dhcp import config as dhcp_config from neutron.agent import dhcp_agent as entry from neutron.agent.linux import dhcp from neutron.agent.linux import interface from neutron.common import config as common_config from neutron.common import constants as n_const from neutron.common import utils +from neutron.conf.agent import dhcp as dhcp_config from neutron import context from neutron.tests import base @@ -499,7 +499,7 @@ dhcp._periodic_resync_helper() sync_state.assert_called_once_with(resync_reasons.keys()) sleep.assert_called_once_with(dhcp.conf.resync_interval) - self.assertEqual(len(dhcp.needs_resync_reasons), 0) + self.assertEqual(0, len(dhcp.needs_resync_reasons)) def test_populate_cache_on_start_without_active_networks_support(self): # emul dhcp driver that doesn't support retrieving of active networks @@ -1162,9 +1162,9 @@ nc.port_lookup = {fake_port1.id: fake_network.id} nc.remove(fake_network) - self.assertEqual(len(nc.cache), 0) - self.assertEqual(len(nc.subnet_lookup), 0) - self.assertEqual(len(nc.port_lookup), 0) + self.assertEqual(0, len(nc.cache)) + self.assertEqual(0, len(nc.subnet_lookup)) + self.assertEqual(0, len(nc.port_lookup)) def test_get_network_by_id(self): nc = dhcp_agent.NetworkCache() @@ -1213,7 +1213,7 @@ nc = dhcp_agent.NetworkCache() nc.put(fake_net) nc.put_port(fake_port2) - self.assertEqual(len(nc.port_lookup), 2) + self.assertEqual(2, len(nc.port_lookup)) self.assertIn(fake_port2, fake_net.ports) def test_put_port_existing(self): @@ -1226,7 +1226,7 @@ nc.put(fake_net) nc.put_port(fake_port2) - self.assertEqual(len(nc.port_lookup), 2) + self.assertEqual(2, len(nc.port_lookup)) self.assertIn(fake_port2, fake_net.ports) def test_remove_port_existing(self): @@ -1239,7 +1239,7 @@ nc.put(fake_net) nc.remove_port(fake_port2) - self.assertEqual(len(nc.port_lookup), 1) + self.assertEqual(1, len(nc.port_lookup)) self.assertNotIn(fake_port2, fake_net.ports) def test_get_port_by_id(self): @@ -1356,7 +1356,7 @@ dh._cleanup_stale_devices = mock.Mock() interface_name = dh.setup(net) - self.assertEqual(interface_name, 'tap12345678-12') + self.assertEqual('tap12345678-12', interface_name) plugin.assert_has_calls([ mock.call.create_dhcp_port( @@ -1579,7 +1579,7 @@ mock_driver.assert_has_calls( [mock.call.get_device_name(fake_port)]) - self.assertEqual(len(plugin.mock_calls), 0) + self.assertEqual(0, len(plugin.mock_calls)) def test_get_device_id(self): fake_net = dhcp.NetModel( @@ -1594,7 +1594,7 @@ uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457' dh = dhcp.DeviceManager(cfg.CONF, None) - self.assertEqual(dh.get_device_id(fake_net), expected) + self.assertEqual(expected, dh.get_device_id(fake_net)) uuid5.assert_called_once_with(uuid.NAMESPACE_DNS, local_hostname) def test_update(self): @@ -1628,7 +1628,7 @@ network = FakeV4Network() dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') @@ -1642,7 +1642,7 @@ network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_route.assert_called_once_with('192.168.1.1', scope='link') @@ -1658,7 +1658,7 @@ network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) @@ -1672,7 +1672,7 @@ network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) device.route.delete_gateway.assert_called_once_with('192.168.0.1') self.assertFalse(device.route.add_gateway.called) @@ -1686,7 +1686,7 @@ network.namespace = 'qdhcp-1234' dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) device.route.delete_gateway.assert_called_once_with('192.168.0.1') self.assertFalse(device.route.add_gateway.called) @@ -1699,7 +1699,7 @@ network = FakeV4Network() dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) self.assertFalse(device.route.add_gateway.called) @@ -1712,7 +1712,7 @@ network = FakeV4Network() dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.0.1') @@ -1727,8 +1727,8 @@ network = FakeV4NetworkOutsideGateway() dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) - self.assertEqual(device.route.list_onlink_routes.call_count, 2) + self.assertEqual(1, device.route.get_gateway.call_count) + self.assertEqual(2, device.route.list_onlink_routes.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.delete_route.assert_called_once_with('192.168.2.1', scope='link') @@ -1749,7 +1749,7 @@ network.subnets = [subnet2, FakeV4Subnet()] dh._set_default_route(network, 'tap-name') - self.assertEqual(device.route.get_gateway.call_count, 1) + self.assertEqual(1, device.route.get_gateway.call_count) self.assertFalse(device.route.delete_gateway.called) device.route.add_gateway.assert_called_once_with('192.168.1.1') @@ -1759,23 +1759,23 @@ d = dict(a=1, b=2) m = dhcp.DictModel(d) - self.assertEqual(m.a, 1) - self.assertEqual(m.b, 2) + self.assertEqual(1, m.a) + self.assertEqual(2, m.b) def test_dict_has_sub_dict(self): d = dict(a=dict(b=2)) m = dhcp.DictModel(d) - self.assertEqual(m.a.b, 2) + self.assertEqual(2, m.a.b) def test_dict_contains_list(self): d = dict(a=[1, 2]) m = dhcp.DictModel(d) - self.assertEqual(m.a, [1, 2]) + self.assertEqual([1, 2], m.a) def test_dict_contains_list_of_dicts(self): d = dict(a=[dict(b=2), dict(c=3)]) m = dhcp.DictModel(d) - self.assertEqual(m.a[0].b, 2) - self.assertEqual(m.a[1].c, 3) + self.assertEqual(2, m.a[0].b) + self.assertEqual(3, m.a[1].c) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/extensions/test_fdb_population.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/extensions/test_fdb_population.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/extensions/test_fdb_population.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/extensions/test_fdb_population.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,192 @@ +# Copyright (c) 2016 Mellanox Technologies, Ltd +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import mock +from neutron_lib import constants +from oslo_config import cfg +import six + +from neutron.agent.l2.extensions.fdb_population import ( + FdbPopulationAgentExtension) +from neutron.common import utils as n_utils +from neutron.plugins.ml2.drivers.linuxbridge.agent.common import ( + constants as linux_bridge_constants) +from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( + constants as ovs_constants) +from neutron.tests import base + + +class FdbPopulationExtensionTestCase(base.BaseTestCase): + + UPDATE_MSG = {u'device_owner': constants.DEVICE_OWNER_ROUTER_INTF, + u'physical_network': u'physnet1', + u'mac_address': u'fa:16:3e:ba:bc:21', + u'port_id': u'17ceda02-43e1-48d8-beb6-35885b20cae6'} + DELETE_MSG = {u'port_id': u'17ceda02-43e1-48d8-beb6-35885b20cae6'} + FDB_TABLE = ("aa:aa:aa:aa:aa:aa self permanent\n" + "bb:bb:bb:bb:bb:bb self permanent") + + def setUp(self): + super(FdbPopulationExtensionTestCase, self).setUp() + cfg.CONF.set_override('shared_physical_device_mappings', + ['physnet1:p1p1'], 'FDB') + self.DEVICE = self._get_existing_device() + + def _get_existing_device(self): + device_mappings = n_utils.parse_mappings( + cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False) + DEVICES = six.next(six.itervalues(device_mappings)) + return DEVICES[0] + + def _get_fdb_extension(self, mock_execute, fdb_table): + mock_execute.return_value = fdb_table + fdb_pop = FdbPopulationAgentExtension() + fdb_pop.initialize(None, ovs_constants.EXTENSION_DRIVER_TYPE) + return fdb_pop + + @mock.patch('neutron.agent.linux.utils.execute') + def test_initialize(self, mock_execute): + fdb_extension = FdbPopulationAgentExtension() + fdb_extension.initialize(None, ovs_constants.EXTENSION_DRIVER_TYPE) + fdb_extension.initialize(None, + linux_bridge_constants.EXTENSION_DRIVER_TYPE) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_initialize_invalid_agent(self, mock_execute): + fdb_extension = FdbPopulationAgentExtension() + self.assertRaises(SystemExit, fdb_extension.initialize, None, 'sriov') + + @mock.patch('neutron.agent.linux.utils.execute') + def test_construct_empty_fdb_table(self, mock_execute): + self._get_fdb_extension(mock_execute, fdb_table='') + cmd = ['bridge', 'fdb', 'show', 'dev', self.DEVICE] + mock_execute.assert_called_once_with(cmd, run_as_root=True) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_construct_existing_fdb_table(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, + fdb_table=self.FDB_TABLE) + cmd = ['bridge', 'fdb', 'show', 'dev', self.DEVICE] + mock_execute.assert_called_once_with(cmd, run_as_root=True) + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + macs = [line.split()[0] for line in self.FDB_TABLE.split('\n')] + for mac in macs: + self.assertIn(mac, updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_update_port_add_rule(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, self.FDB_TABLE) + mock_execute.reset_mock() + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + cmd = ['bridge', 'fdb', 'add', self.UPDATE_MSG['mac_address'], + 'dev', self.DEVICE] + mock_execute.assert_called_once_with(cmd, run_as_root=True) + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + mac = self.UPDATE_MSG['mac_address'] + self.assertIn(mac, updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_update_port_changed_mac(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, self.FDB_TABLE) + mock_execute.reset_mock() + mac = self.UPDATE_MSG['mac_address'] + updated_mac = 'fa:16:3e:ba:bc:33' + commands = [] + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + commands.append(['bridge', 'fdb', 'add', mac, 'dev', self.DEVICE]) + self.UPDATE_MSG['mac_address'] = updated_mac + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + commands.append(['bridge', 'fdb', 'delete', mac, 'dev', self.DEVICE]) + commands.append(['bridge', 'fdb', 'add', updated_mac, + 'dev', self.DEVICE]) + calls = [] + for cmd in commands: + calls.append(mock.call(cmd, run_as_root=True)) + mock_execute.assert_has_calls(calls) + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + self.assertIn(updated_mac, updated_macs_for_device) + self.assertNotIn(mac, updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_unpermitted_device_owner(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, '') + mock_execute.reset_mock() + details = copy.deepcopy(self.UPDATE_MSG) + details['device_owner'] = constants.DEVICE_OWNER_LOADBALANCER + fdb_extension.handle_port(context=None, details=details) + self.assertFalse(mock_execute.called) + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + mac = self.UPDATE_MSG['mac_address'] + self.assertNotIn(mac, updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_catch_init_exception(self, mock_execute): + mock_execute.side_effect = RuntimeError + fdb_extension = self._get_fdb_extension(mock_execute, '') + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + self.assertIsNone(updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_catch_update_port_exception(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, '') + mock_execute.side_effect = RuntimeError + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + mac = self.UPDATE_MSG['mac_address'] + self.assertNotIn(mac, updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_catch_delete_port_exception(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, '') + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + mock_execute.side_effect = RuntimeError + fdb_extension.delete_port(context=None, details=self.DELETE_MSG) + updated_macs_for_device = ( + fdb_extension.fdb_tracker.device_to_macs.get(self.DEVICE)) + mac = self.UPDATE_MSG['mac_address'] + self.assertIn(mac, updated_macs_for_device) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_delete_port(self, mock_execute): + fdb_extension = self._get_fdb_extension(mock_execute, '') + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + mock_execute.reset_mock() + fdb_extension.delete_port(context=None, details=self.DELETE_MSG) + cmd = ['bridge', 'fdb', 'delete', self.UPDATE_MSG['mac_address'], + 'dev', self.DEVICE] + mock_execute.assert_called_once_with(cmd, run_as_root=True) + + @mock.patch('neutron.agent.linux.utils.execute') + def test_multiple_devices(self, mock_execute): + cfg.CONF.set_override('shared_physical_device_mappings', + ['physnet1:p1p1', 'physnet1:p2p2'], 'FDB') + + fdb_extension = self._get_fdb_extension(mock_execute, '') + fdb_extension.handle_port(context=None, details=self.UPDATE_MSG) + mac = self.UPDATE_MSG['mac_address'] + calls = [] + cmd = ['bridge', 'fdb', 'add', mac, 'dev', 'p1p1'] + calls.append(mock.call(cmd, run_as_root=True)) + cmd = ['bridge', 'fdb', 'add', mac, 'dev', 'p2p2'] + calls.append(mock.call(cmd, run_as_root=True)) + mock_execute.assert_has_calls(calls, any_order=True) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/extensions/test_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/extensions/test_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/extensions/test_manager.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/extensions/test_manager.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_config import cfg - -from neutron.agent.l2.extensions import manager as ext_manager -from neutron.tests import base - - -class TestAgentExtensionsManager(base.BaseTestCase): - - def setUp(self): - super(TestAgentExtensionsManager, self).setUp() - mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', - autospec=True).start() - conf = cfg.CONF - ext_manager.register_opts(conf) - cfg.CONF.set_override('extensions', ['qos'], 'agent') - self.manager = ext_manager.AgentExtensionsManager(conf) - - def _get_extension(self): - return self.manager.extensions[0].obj - - def test_initialize(self): - connection = object() - self.manager.initialize(connection, 'fake_driver_type') - ext = self._get_extension() - ext.initialize.assert_called_once_with(connection, 'fake_driver_type') - - def test_handle_port(self): - context = object() - data = object() - self.manager.handle_port(context, data) - ext = self._get_extension() - ext.handle_port.assert_called_once_with(context, data) - - def test_delete_port(self): - context = object() - data = object() - self.manager.delete_port(context, data) - ext = self._get_extension() - ext.delete_port.assert_called_once_with(context, data) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/extensions/test_qos.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/extensions/test_qos.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/extensions/test_qos.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/extensions/test_qos.py 2016-08-03 20:10:34.000000000 +0000 @@ -137,6 +137,10 @@ def setUp(self): super(QosExtensionBaseTestCase, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.qos_ext = qos.QosAgentExtension() self.context = context.get_admin_context() self.connection = mock.Mock() @@ -234,7 +238,7 @@ self.qos_ext, '_process_update_policy') as update_mock: policy_obj = mock.Mock() - self.qos_ext._handle_notification(policy_obj, events.UPDATED) + self.qos_ext._handle_notification([policy_obj], events.UPDATED) update_mock.assert_called_with(policy_obj) def test__process_update_policy(self): @@ -304,7 +308,7 @@ resources_rpc.resource_type_versioned_topic(resource_type), [rpc_mock()], fanout=True) - for resource_type in self.qos_ext.SUPPORTED_RESOURCES] + for resource_type in self.qos_ext.SUPPORTED_RESOURCE_TYPES] ) subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.agent.l2 import l2_agent_extensions_manager as l2_ext_manager +from neutron.tests import base + + +class TestL2AgentExtensionsManager(base.BaseTestCase): + + def setUp(self): + super(TestL2AgentExtensionsManager, self).setUp() + mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', + autospec=True).start() + conf = cfg.CONF + l2_ext_manager.register_opts(conf) + cfg.CONF.set_override('extensions', ['qos'], 'agent') + self.manager = l2_ext_manager.L2AgentExtensionsManager(conf) + + def _get_extension(self): + return self.manager.extensions[0].obj + + def test_initialize(self): + connection = object() + self.manager.initialize(connection, 'fake_driver_type') + ext = self._get_extension() + ext.initialize.assert_called_once_with(connection, 'fake_driver_type') + + def test_handle_port(self): + context = object() + data = object() + self.manager.handle_port(context, data) + ext = self._get_extension() + ext.handle_port.assert_called_once_with(context, data) + + def test_delete_port(self): + context = object() + data = object() + self.manager.delete_port(context, data) + ext = self._get_extension() + ext.delete_port.assert_called_once_with(context, data) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_agent.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -20,8 +20,9 @@ import eventlet import mock import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from neutron_lib import exceptions as exc +from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_utils import timeutils @@ -31,7 +32,6 @@ from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent -from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import dvr_edge_router as dvr_router from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import ha @@ -48,9 +48,10 @@ from neutron.agent.linux import ra from neutron.agent.metadata import driver as metadata_driver from neutron.agent import rpc as agent_rpc -from neutron.common import config as base_config from neutron.common import constants as n_const from neutron.common import exceptions as n_exc +from neutron.conf.agent.l3 import config as l3_config +from neutron.conf import common as base_config from neutron.extensions import portbindings from neutron.plugins.common import constants as p_const from neutron.tests import base @@ -71,7 +72,7 @@ self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') - self.conf.register_opts(l3_config.OPTS) + l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf) self.conf.register_opts(ha.OPTS) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_process_monitor_opts(self.conf) @@ -83,8 +84,7 @@ self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('send_arp_for_ha', 1) - self.conf.set_override('state_path', '/tmp') - self.conf.set_override('ra_confs', '/tmp') + self.conf.set_override('state_path', cfg.CONF.state_path) self.conf.set_override('pd_dhcp_driver', '') self.device_exists_p = mock.patch( @@ -151,7 +151,7 @@ 'id': subnet_id_1}], 'network_id': _uuid(), 'device_owner': - l3_constants.DEVICE_OWNER_ROUTER_SNAT, + lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_1, 'ip_address': '152.2.0.13', @@ -162,7 +162,7 @@ 'id': subnet_id_2}], 'network_id': _uuid(), 'device_owner': - l3_constants.DEVICE_OWNER_ROUTER_SNAT, + lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_2, 'ip_address': '152.10.0.13', @@ -433,7 +433,7 @@ 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} - router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_added(ex_gw_port, interface_name) if not router.get('distributed'): self.assertEqual(1, self.mock_driver.plug.call_count) @@ -581,7 +581,7 @@ 'floating_ip_address': '192.168.1.34', 'fixed_ip_address': '192.168.0.1', 'port_id': _uuid()}]} - router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_updated(ex_gw_port, interface_name) self.assertEqual(1, self.mock_driver.plug.call_count) self.assertEqual(1, self.mock_driver.init_router_port.call_count) @@ -611,6 +611,68 @@ def test_external_gateway_updated_dual_stack(self): self._test_external_gateway_updated(dual_stack=True) + def test_dvr_edge_router_init_for_snat_namespace_object(self): + router = {'id': _uuid()} + ri = dvr_router.DvrEdgeRouter(mock.Mock(), + HOSTNAME, + router['id'], + router, + **self.ri_kwargs) + # Make sure that ri.snat_namespace object is created when the + # router is initialized + self.assertIsNotNone(ri.snat_namespace) + + def test_ext_gw_updated_calling_snat_ns_delete_if_gw_port_host_none( + self): + """Test to check the impact of snat_namespace object. + + This function specifically checks the impact of the snat + namespace object value on external_gateway_removed for deleting + snat_namespace when the gw_port_host mismatches or none. + """ + router = l3_test_common.prepare_router_data(num_internal_ports=2) + ri = dvr_router.DvrEdgeRouter(mock.Mock(), + HOSTNAME, + router['id'], + router, + **self.ri_kwargs) + with mock.patch.object(dvr_snat_ns.SnatNamespace, + 'delete') as snat_ns_delete: + interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test( + self, ri) + router['gw_port_host'] = '' + ri.external_gateway_updated(ex_gw_port, interface_name) + if router['gw_port_host'] != ri.host: + self.assertEqual(1, snat_ns_delete.call_count) + + @mock.patch.object(namespaces.Namespace, 'delete') + def test_snat_ns_delete_not_called_when_snat_namespace_does_not_exist( + self, mock_ns_del): + """Test to check the impact of snat_namespace object. + + This function specifically checks the impact of the snat + namespace object initialization without the actual creation + of snat_namespace. When deletes are issued to the snat + namespace based on the snat namespace object existence, it + should be checking for the valid namespace existence before + it tries to delete. + """ + router = l3_test_common.prepare_router_data(num_internal_ports=2) + ri = dvr_router.DvrEdgeRouter(mock.Mock(), + HOSTNAME, + router['id'], + router, + **self.ri_kwargs) + # Make sure we set a return value to emulate the non existence + # of the namespace. + self.mock_ip.netns.exists.return_value = False + self.assertIsNotNone(ri.snat_namespace) + interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, + ri) + ri._external_gateway_removed = mock.Mock() + ri.external_gateway_removed(ex_gw_port, interface_name) + self.assertFalse(mock_ns_del.called) + def _test_ext_gw_updated_dvr_edge_router(self, host_match, snat_hosted_before=True): """ @@ -629,8 +691,6 @@ if snat_hosted_before: ri._create_snat_namespace() snat_ns_name = ri.snat_namespace.name - else: - self.assertIsNone(ri.snat_namespace) interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self, ri) @@ -650,7 +710,6 @@ bridge=self.conf.external_network_bridge, namespace=snat_ns_name, prefix=l3_agent.EXTERNAL_DEV_PREFIX) - self.assertIsNone(ri.snat_namespace) else: if not snat_hosted_before: self.assertIsNotNone(ri.snat_namespace) @@ -724,7 +783,7 @@ def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router, negate=False): - interfaces = router[l3_constants.INTERFACE_KEY] + interfaces = router[lib_constants.INTERFACE_KEY] source_cidrs = [] for iface in interfaces: for subnet in iface['subnets']: @@ -771,9 +830,9 @@ test_port = { 'mac_address': '00:12:23:34:45:56', 'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id( - router[l3_constants.INTERFACE_KEY][0]), + router[lib_constants.INTERFACE_KEY][0]), 'ip_address': '101.12.13.14'}]} - internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, []) # test valid case with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces: get_interfaces.return_value = [test_port] @@ -800,7 +859,7 @@ router, **self.ri_kwargs) subnet_id = l3_test_common.get_subnet_id( - router[l3_constants.INTERFACE_KEY][0]) + router[lib_constants.INTERFACE_KEY][0]) ri.router['distributed'] = True ri.router['_snat_router_interfaces'] = [{ 'fixed_ips': [{'subnet_id': subnet_id, @@ -837,7 +896,7 @@ fake_floatingips2 = copy.deepcopy(fake_floatingips1) fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8' - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips'] ri.process(agent) ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() @@ -862,7 +921,7 @@ self.assertEqual(1, ri.external_gateway_updated.call_count) # remove just the floating ips - del router[l3_constants.FLOATINGIP_KEY] + del router[lib_constants.FLOATINGIP_KEY] ri.process(agent) ri.process_floating_ip_addresses.assert_called_with(mock.ANY) ri.process_floating_ip_addresses.reset_mock() @@ -870,7 +929,7 @@ ri.process_floating_ip_nat_rules.reset_mock() # now no ports so state is torn down - del router[l3_constants.INTERFACE_KEY] + del router[lib_constants.INTERFACE_KEY] del router['gw_port'] ri.process(agent) self.assertEqual(1, self.send_adv_notif.call_count) @@ -878,24 +937,23 @@ self.assertEqual(distributed, ri.process_floating_ip_addresses.called) self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called) - @mock.patch('neutron.agent.linux.ip_lib.IPDevice') - def _test_process_floating_ip_addresses_add(self, ri, agent, IPDevice): + def _test_process_floating_ip_addresses_add(self, ri, agent): floating_ips = ri.get_floating_ips() fip_id = floating_ips[0]['id'] - IPDevice.return_value = device = mock.Mock() + device = self.mock_ip_dev device.addr.list.return_value = [] ri.iptables_manager.ipv4['nat'] = mock.MagicMock() ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id} ri.add_floating_ip = mock.Mock( - return_value=l3_constants.FLOATINGIP_STATUS_ACTIVE) + return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE) with mock.patch.object(lla.LinkLocalAllocator, '_write'): if ri.router['distributed']: ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id']) ri.create_dvr_fip_interfaces(ex_gw_port) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) - self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, + self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) ri.add_floating_ip.assert_called_once_with( floating_ips[0], mock.sentinel.interface_name, device) @@ -926,7 +984,7 @@ ) router = l3_test_common.prepare_router_data(enable_snat=True) - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -992,7 +1050,7 @@ ) router = l3_test_common.prepare_router_data(enable_snat=True) - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[n_const.FLOATINGIP_AGENT_INTF_KEY] = [] router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1037,7 +1095,7 @@ ) router = l3_test_common.prepare_router_data(enable_snat=True) - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1088,7 +1146,7 @@ ) router = l3_test_common.prepare_router_data(enable_snat=True) - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1124,7 +1182,7 @@ 'host': HOSTNAME}]} router = l3_test_common.prepare_router_data(enable_snat=True) - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.iptables_manager.ipv4['nat'] = mock.MagicMock() @@ -1351,7 +1409,7 @@ def test_process_router_ipv6_slaac_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( - router, ra_mode=n_const.IPV6_SLAAC) + router, ra_mode=lib_constants.IPV6_SLAAC) self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix radvd_config_str = self.utils_replace_file.call_args[0][1] @@ -1361,7 +1419,7 @@ def test_process_router_ipv6_dhcpv6_stateful_interface_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_interface_added( - router, ra_mode=n_const.DHCPV6_STATEFUL) + router, ra_mode=lib_constants.DHCPV6_STATEFUL) self._assert_ri_process_enabled(ri) # Expect radvd configured with prefix radvd_config_str = self.utils_replace_file.call_args[0][1] @@ -1371,12 +1429,12 @@ def test_process_router_ipv6_subnets_added(self): router = l3_test_common.prepare_router_data() ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[ - {'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}, - {'ra_mode': n_const.DHCPV6_STATELESS, - 'address_mode': n_const.DHCPV6_STATELESS}, - {'ra_mode': n_const.DHCPV6_STATEFUL, - 'address_mode': n_const.DHCPV6_STATEFUL}]) + {'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}, + {'ra_mode': lib_constants.DHCPV6_STATELESS, + 'address_mode': lib_constants.DHCPV6_STATELESS}, + {'ra_mode': lib_constants.DHCPV6_STATEFUL, + 'address_mode': lib_constants.DHCPV6_STATEFUL}]) self._assert_ri_process_enabled(ri) radvd_config_str = self.utils_replace_file.call_args[0][1] # Assert we have a prefix from IPV6_SLAAC and a prefix from @@ -1396,8 +1454,8 @@ l3_test_common.router_append_subnet( router, count=1, ip_version=6, ipv6_subnet_modes=[ - {'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}]) + {'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}]) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) radvd_config = self.utils_replace_file.call_args[0][1].split() @@ -1410,13 +1468,13 @@ self.external_process.reset_mock() self.utils_replace_file.reset_mock() # Add the second subnet on the same interface - interface_id = router[l3_constants.INTERFACE_KEY][1]['id'] + interface_id = router[lib_constants.INTERFACE_KEY][1]['id'] l3_test_common.router_append_subnet( router, count=1, ip_version=6, ipv6_subnet_modes=[ - {'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}], + {'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}], interface_id=interface_id) self._process_router_instance_for_agent(agent, ri, router) # radvd should have been enabled again and the interface @@ -1450,7 +1508,7 @@ # Process with NAT ri.process(agent) # Add an interface and reprocess - del router[l3_constants.INTERFACE_KEY][1] + del router[lib_constants.INTERFACE_KEY][1] # Reassign the router object to RouterInfo ri.router = router ri.process(agent) @@ -1471,7 +1529,7 @@ self.external_process.reset_mock() self.process_monitor.reset_mock() # Remove the IPv6 interface and reprocess - del router[l3_constants.INTERFACE_KEY][1] + del router[lib_constants.INTERFACE_KEY][1] self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_disabled(ri) @@ -1484,8 +1542,8 @@ # Add an IPv6 interface with two subnets and reprocess l3_test_common.router_append_subnet( router, count=2, ip_version=6, - ipv6_subnet_modes=([{'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}] + ipv6_subnet_modes=([{'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}] * 2)) self._process_router_instance_for_agent(agent, ri, router) self._assert_ri_process_enabled(ri) @@ -1493,10 +1551,10 @@ self.utils_replace_file.reset_mock() self.external_process.reset_mock() # Remove one subnet from the interface and reprocess - interfaces = copy.deepcopy(router[l3_constants.INTERFACE_KEY]) + interfaces = copy.deepcopy(router[lib_constants.INTERFACE_KEY]) del interfaces[1]['subnets'][0] del interfaces[1]['fixed_ips'][0] - router[l3_constants.INTERFACE_KEY] = interfaces + router[lib_constants.INTERFACE_KEY] = interfaces self._process_router_instance_for_agent(agent, ri, router) # Assert radvd was enabled again and that we only have one # prefix on the interface @@ -1520,7 +1578,7 @@ internal_network_added.side_effect = RuntimeError self.assertRaises(RuntimeError, ri.process, agent) self.assertNotIn( - router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_network_added.side_effect = None @@ -1530,7 +1588,7 @@ ri.process(agent) # We were able to add the port to ri.internal_ports self.assertIn( - router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_internal_network_removed_unexpected_error(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1550,7 +1608,7 @@ # The above port is set to down state, remove it. self.assertRaises(RuntimeError, ri.process, agent) self.assertIn( - router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) # The unexpected exception has been fixed manually internal_net_removed.side_effect = None @@ -1560,18 +1618,18 @@ ri.process(agent) # We were able to remove the port from ri.internal_ports self.assertNotIn( - router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + router[lib_constants.INTERFACE_KEY][0], ri.internal_ports) def test_process_router_floatingip_nochange(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=1) fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', - 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']} + 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']} fip2 = copy.copy(fip1) fip2.update({'id': _uuid(), 'status': 'DOWN', 'floating_ip_address': '9.9.9.9'}) - router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2] + router[lib_constants.FLOATINGIP_KEY] = [fip1, fip2] ri = legacy_router.LegacyRouter(router['id'], router, **self.ri_kwargs) @@ -1592,10 +1650,10 @@ router = l3_test_common.prepare_router_data(num_internal_ports=1) fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', - 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']} + 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']} fip2 = copy.copy(fip1) fip2.update({'id': _uuid(), 'status': 'DOWN', }) - router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2] + router[lib_constants.FLOATINGIP_KEY] = [fip1, fip2] ri = legacy_router.LegacyRouter(router['id'], router, **self.ri_kwargs) @@ -1618,12 +1676,12 @@ 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) - router[l3_constants.FLOATINGIP_KEY] = [ + router[lib_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', 'status': 'DOWN', - 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] + 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}] ri = legacy_router.LegacyRouter(router['id'], router, @@ -1633,16 +1691,16 @@ # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, - {fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}) + {fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}) mock_update_fip_status.reset_mock() # Process the router again, this time without floating IPs - router[l3_constants.FLOATINGIP_KEY] = [] + router[lib_constants.FLOATINGIP_KEY] = [] ri.router = router ri.process(agent) # Assess the call for putting the floating IP up was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, - {fip_id: l3_constants.FLOATINGIP_STATUS_DOWN}) + {fip_id: lib_constants.FLOATINGIP_STATUS_DOWN}) def test_process_router_floatingip_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1651,11 +1709,11 @@ 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) - router[l3_constants.FLOATINGIP_KEY] = [ + router[lib_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', - 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] + 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.process_floating_ip_addresses = mock.Mock( @@ -1666,7 +1724,7 @@ # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, - {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}) + {fip_id: lib_constants.FLOATINGIP_STATUS_ERROR}) def test_process_external_iptables_exception(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1675,11 +1733,11 @@ 'update_floatingip_statuses') as mock_update_fip_status: fip_id = _uuid() router = l3_test_common.prepare_router_data(num_internal_ports=1) - router[l3_constants.FLOATINGIP_KEY] = [ + router[lib_constants.FLOATINGIP_KEY] = [ {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', - 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] + 'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}] ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) ri.iptables_manager._apply = mock.Mock(side_effect=Exception) @@ -1688,7 +1746,7 @@ # was performed mock_update_fip_status.assert_called_once_with( mock.ANY, ri.router_id, - {fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}) + {fip_id: lib_constants.FLOATINGIP_STATUS_ERROR}) self.assertEqual(1, ri.iptables_manager._apply.call_count) @@ -1771,7 +1829,7 @@ num_internal_ports=1) ri = l3router.RouterInfo(router['id'], router, **self.ri_kwargs) - internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, []) self.assertEqual(1, len(internal_ports)) internal_port = internal_ports[0] @@ -1814,7 +1872,7 @@ self.mock_driver.unplug.assert_called_with( stale_devnames[0], - bridge="br-ex", + bridge="", namespace=ri.ns_name, prefix=l3_agent.EXTERNAL_DEV_PREFIX) @@ -1995,6 +2053,7 @@ self._test_process_routers_update_router_deleted(True) def test_process_router_if_compatible_with_no_ext_net_in_conf(self): + self.conf.set_override('external_network_bridge', 'br-ex') agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' @@ -2023,6 +2082,7 @@ self.assertFalse(self.plugin_api.get_external_network_id.called) def test_process_router_if_compatible_with_stale_cached_ext_net(self): + self.conf.set_override('external_network_bridge', 'br-ex') agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_external_network_id.return_value = 'aaa' agent.target_ex_net_id = 'bbb' @@ -2038,6 +2098,7 @@ agent.context) def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self): + self.conf.set_override('external_network_bridge', 'br-ex') agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = {'id': _uuid(), @@ -2079,7 +2140,6 @@ 'external_gateway_info': {'network_id': 'aaa'}} agent.router_info = {} - self.conf.set_override('external_network_bridge', '') agent._process_router_if_compatible(router) self.assertIn(router['id'], agent.router_info) @@ -2233,8 +2293,6 @@ self.assertEqual(tuple(), agent.neutron_service_plugins) def test_external_gateway_removed_ext_gw_port_no_fip_ns(self): - self.conf.set_override('state_path', '/tmp') - agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr_snat' router = l3_test_common.prepare_router_data(num_internal_ports=2) @@ -2302,8 +2360,8 @@ def test_generate_radvd_mtu_conf(self): router = l3_test_common.prepare_router_data() - ipv6_subnet_modes = [{'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}] + ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}] network_mtu = '1446' ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes, @@ -2311,22 +2369,22 @@ network_mtu) expected = "AdvLinkMTU 1446" ri.agent_conf.set_override('advertise_mtu', False) - ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) + ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) self.assertNotIn(expected, self.utils_replace_file.call_args[0][1]) # Verify that MTU is advertised when advertise_mtu is True ri.agent_conf.set_override('advertise_mtu', True) - ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) + ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) self.assertIn(expected, self.utils_replace_file.call_args[0][1]) def test_generate_radvd_conf_other_and_managed_flag(self): # expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...} - expected = {n_const.IPV6_SLAAC: (False, False), - n_const.DHCPV6_STATELESS: (True, False), - n_const.DHCPV6_STATEFUL: (False, True)} + expected = {lib_constants.IPV6_SLAAC: (False, False), + lib_constants.DHCPV6_STATELESS: (True, False), + lib_constants.DHCPV6_STATEFUL: (False, True)} - modes = [n_const.IPV6_SLAAC, n_const.DHCPV6_STATELESS, - n_const.DHCPV6_STATEFUL] + modes = [lib_constants.IPV6_SLAAC, lib_constants.DHCPV6_STATELESS, + lib_constants.DHCPV6_STATEFUL] mode_combos = list(iter_chain(*[[list(combo) for combo in iter_combinations(modes, i)] for i in range(1, len(modes) + 1)])) @@ -2337,7 +2395,7 @@ ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes) - ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) + ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) def assertFlag(flag): return (self.assertIn if flag else self.assertNotIn) @@ -2355,11 +2413,11 @@ self.conf.set_override('min_rtr_adv_interval', 22) self.conf.set_override('max_rtr_adv_interval', 66) router = l3_test_common.prepare_router_data() - ipv6_subnet_modes = [{'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}] + ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}] ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes) - ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) + ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) self.assertIn("MinRtrAdvInterval 22", self.utils_replace_file.call_args[0][1]) self.assertIn("MaxRtrAdvInterval 66", @@ -2367,13 +2425,13 @@ def test_generate_radvd_rdnss_conf(self): router = l3_test_common.prepare_router_data() - ipv6_subnet_modes = [{'ra_mode': n_const.IPV6_SLAAC, - 'address_mode': n_const.IPV6_SLAAC}] + ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC, + 'address_mode': lib_constants.IPV6_SLAAC}] dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400'] ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes, dns_nameservers=dns_list) - ri.radvd._generate_radvd_conf(router[l3_constants.INTERFACE_KEY]) + ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY]) # Verify that radvd configuration file includes RDNSS entries expected = "RDNSS " for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]: @@ -2454,7 +2512,7 @@ expected_calls = [] for intf in intfs: # Remove the router interface - router[l3_constants.INTERFACE_KEY].remove(intf) + router[lib_constants.INTERFACE_KEY].remove(intf) requestor_id = self._pd_get_requestor_id(intf, router, ri) expected_calls += (self._pd_expected_call_external_process( requestor_id, ri, False)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,6 +14,7 @@ import copy import mock +from oslo_config import cfg from oslo_utils import uuidutils from neutron.agent.common import utils @@ -30,7 +31,7 @@ def setUp(self): super(TestDvrFipNs, self).setUp() self.conf = mock.Mock() - self.conf.state_path = '/tmp' + self.conf.state_path = cfg.CONF.state_path self.driver = mock.Mock() self.driver.DEV_NAME_LEN = 14 self.net_id = _uuid() @@ -164,10 +165,10 @@ @mock.patch.object(ip_lib.IpNetnsCommand, 'exists') def _test_create(self, old_kernel, exists, execute, IPTables): exists.return_value = True - # There are up to four sysctl calls - two for ip_nonlocal_bind, - # and two to enable forwarding - execute.side_effect = [RuntimeError if old_kernel else None, - None, None, None] + # There are up to four sysctl calls - two to enable forwarding, + # and two for ip_nonlocal_bind + execute.side_effect = [None, None, + RuntimeError if old_kernel else None, None] self.fip_ns._iptables_manager = IPTables() self.fip_ns.create() @@ -204,9 +205,12 @@ ip_wrapper.get_devices.return_value = [dev1, dev2] with mock.patch.object(self.fip_ns.ip_wrapper_root.netns, - 'delete') as delete: + 'delete') as delete,\ + mock.patch.object(self.fip_ns.ip_wrapper_root.netns, + 'exists', return_value=True) as exists: self.fip_ns.delete() - delete.assert_called_once_with(mock.ANY) + exists.assert_called_once_with(self.fip_ns.name) + delete.assert_called_once_with(self.fip_ns.name) ext_net_bridge = self.conf.external_network_bridge ns_name = self.fip_ns.get_name() @@ -216,6 +220,15 @@ namespace=ns_name) ip_wrapper.del_veth.assert_called_once_with('fpr-aaaa') + def test_destroy_no_namespace(self): + with mock.patch.object(self.fip_ns.ip_wrapper_root.netns, + 'delete') as delete,\ + mock.patch.object(self.fip_ns.ip_wrapper_root.netns, + 'exists', return_value=False) as exists: + self.fip_ns.delete() + exists.assert_called_once_with(self.fip_ns.name) + self.assertFalse(delete.called) + @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') def _test_create_rtr_2_fip_link(self, dev_exists, addr_exists, @@ -224,6 +237,7 @@ ri.router_id = _uuid() ri.rtr_fip_subnet = None ri.ns_name = mock.sentinel.router_ns + ri.get_ex_gw_port.return_value = {'mtu': 2000} rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(ri.router_id) fip_2_rtr_name = self.fip_ns.get_int_device_name(ri.router_id) @@ -234,7 +248,6 @@ allocator.allocate.return_value = pair addr_pair = pair.get_pair() ip_wrapper = IPWrapper() - self.conf.network_device_mtu = 2000 ip_wrapper.add_veth.return_value = (IPDevice(), IPDevice()) device = IPDevice() device.exists.return_value = dev_exists diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_dvr_local_router.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_dvr_local_router.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_dvr_local_router.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_dvr_local_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,13 +14,13 @@ import mock import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants +from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent -from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import dvr_local_router as dvr_router from neutron.agent.l3 import ha from neutron.agent.l3 import link_local_allocator as lla @@ -28,9 +28,10 @@ from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib -from neutron.common import config as base_config from neutron.common import constants as n_const from neutron.common import utils as common_utils +from neutron.conf.agent.l3 import config as l3_config +from neutron.conf import common as base_config from neutron.extensions import portbindings from neutron.tests import base from neutron.tests.common import l3_test_common @@ -49,7 +50,7 @@ self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') - self.conf.register_opts(l3_config.OPTS) + l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf) self.conf.register_opts(ha.OPTS) agent_config.register_interface_driver_opts_helper(self.conf) agent_config.register_process_monitor_opts(self.conf) @@ -58,7 +59,7 @@ self.conf.set_override('interface_driver', 'neutron.agent.linux.interface.NullDriver') self.conf.set_override('send_arp_for_ha', 1) - self.conf.set_override('state_path', '') + self.conf.set_override('state_path', cfg.CONF.state_path) self.device_exists_p = mock.patch( 'neutron.agent.linux.ip_lib.device_exists') @@ -124,7 +125,7 @@ 'id': subnet_id_1}], 'network_id': _uuid(), 'device_owner': - l3_constants.DEVICE_OWNER_ROUTER_SNAT, + lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_1, 'ip_address': '152.2.0.13', @@ -135,7 +136,7 @@ 'id': subnet_id_2}], 'network_id': _uuid(), 'device_owner': - l3_constants.DEVICE_OWNER_ROUTER_SNAT, + lib_constants.DEVICE_OWNER_ROUTER_SNAT, 'mac_address': 'fa:16:3e:80:8d:80', 'fixed_ips': [{'subnet_id': subnet_id_2, 'ip_address': '152.10.0.13', @@ -281,6 +282,28 @@ self.assertFalse(ri.fip_ns.unsubscribe.called) ri.fip_ns.local_subnets.allocate.assert_called_once_with(ri.router_id) + @mock.patch.object(ip_lib, 'IPRule') + def test_floating_ip_moved_dist(self, mIPRule): + router = mock.MagicMock() + ri = self._create_router(router) + floating_ip_address = '15.1.2.3' + fip = {'floating_ip_address': floating_ip_address, + 'fixed_ip_address': '192.168.0.1'} + ri.floating_ips_dict['15.1.2.3'] = FIP_PRI + ri.fip_ns = mock.Mock() + ri.fip_ns.allocate_rule_priority.return_value = FIP_PRI + ri.floating_ip_moved_dist(fip) + + mIPRule().rule.delete.assert_called_once_with( + ip=floating_ip_address, table=16, priority=FIP_PRI) + ri.fip_ns.deallocate_rule_priority.assert_called_once_with( + floating_ip_address) + ri.fip_ns.allocate_rule_priority.assert_called_once_with( + floating_ip_address) + mIPRule().rule.add.assert_called_with(ip='192.168.0.1', + table=16, + priority=FIP_PRI) + def _test_add_floating_ip(self, ri, fip, is_failure): ri.floating_ip_added_dist = mock.Mock() @@ -297,7 +320,7 @@ fip = {'floating_ip_address': ip} result = self._test_add_floating_ip(ri, fip, True) ri.floating_ip_added_dist.assert_called_once_with(fip, ip + '/32') - self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result) + self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, result) @mock.patch.object(router_info.RouterInfo, 'remove_floating_ip') def test_remove_floating_ip(self, super_remove_floating_ip): @@ -352,10 +375,10 @@ router['distributed'] = True ri = dvr_router.DvrLocalRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) - ports = ri.router.get(l3_constants.INTERFACE_KEY, []) + ports = ri.router.get(lib_constants.INTERFACE_KEY, []) subnet_id = l3_test_common.get_subnet_id(ports[0]) test_ports = [{'mac_address': '00:11:22:33:44:55', - 'device_owner': l3_constants.DEVICE_OWNER_DHCP, + 'device_owner': lib_constants.DEVICE_OWNER_DHCP, 'fixed_ips': [{'ip_address': '1.2.3.4', 'prefixlen': 24, 'subnet_id': subnet_id}]}] @@ -382,7 +405,7 @@ router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True subnet_id = l3_test_common.get_subnet_id( - router[l3_constants.INTERFACE_KEY][0]) + router[lib_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} @@ -398,7 +421,7 @@ agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) router = l3_test_common.prepare_router_data(num_internal_ports=2) subnet_id = l3_test_common.get_subnet_id( - router[l3_constants.INTERFACE_KEY][0]) + router[lib_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.7.23.11', 'mac_address': '00:11:22:33:44:55', 'subnet_id': subnet_id} @@ -424,7 +447,7 @@ ri = dvr_router.DvrLocalRouter( agent, HOSTNAME, router['id'], router, **self.ri_kwargs) subnet_id = l3_test_common.get_subnet_id( - ri.router[l3_constants.INTERFACE_KEY][0]) + ri.router[lib_constants.INTERFACE_KEY][0]) return ri, subnet_id def test__update_arp_entry_calls_arp_cache_with_no_device(self): @@ -464,7 +487,7 @@ router = l3_test_common.prepare_router_data(num_internal_ports=2) router['distributed'] = True subnet_id = l3_test_common.get_subnet_id( - router[l3_constants.INTERFACE_KEY][0]) + router[lib_constants.INTERFACE_KEY][0]) arp_table = {'ip_address': '1.5.25.15', 'mac_address': '00:44:33:22:11:55', 'subnet_id': subnet_id} @@ -491,7 +514,7 @@ 'gateway_ip': '20.0.0.1'}], 'id': _uuid(), portbindings.HOST_ID: 'myhost', - 'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW, + 'device_owner': lib_constants.DEVICE_OWNER_AGENT_GW, 'network_id': fake_network_id, 'mac_address': 'ca:fe:de:ad:be:ef'}] ) @@ -522,7 +545,7 @@ 'port_id': _uuid()}]} router = l3_test_common.prepare_router_data(enable_snat=True) - router[l3_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] + router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips'] router['distributed'] = True agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) ri = dvr_router.DvrLocalRouter(agent, @@ -578,8 +601,6 @@ 'dvr', 0) def test_external_gateway_removed_ext_gw_port_and_fip(self): - self.conf.set_override('state_path', '/tmp') - agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) agent.conf.agent_mode = 'dvr' router = l3_test_common.prepare_router_data(num_internal_ports=2) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_ha_router.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_ha_router.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_ha_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_ha_router.py 2016-08-03 20:10:34.000000000 +0000 @@ -68,3 +68,7 @@ 'gateway_ip': '30.0.0.1'}) ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') self.assertEqual(1, len(mock_instance.virtual_routes.gateway_routes)) + + subnets[1]['gateway_ip'] = None + ri._add_default_gw_virtual_route(ex_gw_port, 'qg-abc') + self.assertEqual(0, len(mock_instance.virtual_routes.gateway_routes)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_item_allocator.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_item_allocator.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_item_allocator.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_item_allocator.py 2016-08-03 20:10:34.000000000 +0000 @@ -51,6 +51,21 @@ self.assertIn('da873ca2', a.remembered) self.assertEqual({}, a.allocations) + def test__init__readfile_error(self): + test_pool = set(TestObject(s) for s in range(32768, 40000)) + with mock.patch.object(ia.ItemAllocator, '_read') as read,\ + mock.patch.object(ia.ItemAllocator, '_write') as write: + read.return_value = ["da873ca2,10\n", + "corrupt_entry_no_delimiter\n", + "42c9daf7,11\n"] + a = ia.ItemAllocator('/file', TestObject, test_pool) + + self.assertIn('da873ca2', a.remembered) + self.assertIn('42c9daf7', a.remembered) + self.assertNotIn('corrupt_entry_no_delimiter', a.remembered) + self.assertEqual({}, a.allocations) + self.assertTrue(write.called) + def test_allocate(self): test_pool = set([TestObject(33000), TestObject(33001)]) a = ia.ItemAllocator('/file', TestObject, test_pool) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_legacy_router.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_legacy_router.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_legacy_router.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_legacy_router.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,7 +13,7 @@ # under the License. import mock -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from oslo_utils import uuidutils from neutron.agent.l3 import legacy_router @@ -57,6 +57,35 @@ device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) + @mock.patch.object(ip_lib, 'IPDevice') + def test_remove_multiple_external_gateway_ips(self, IPDevice): + ri = self._create_router(mock.MagicMock()) + IPDevice.return_value = device = mock.Mock() + gw_ip_pri = '172.16.5.110' + gw_ip_sec = '172.16.5.111' + gw_ip6_pri = '2001:db8::1' + gw_ip6_sec = '2001:db8::2' + v4_prefixlen = 24 + v6_prefixlen = 64 + ex_gw_port = {'fixed_ips': [ + {'ip_address': gw_ip_pri, + 'prefixlen': v4_prefixlen}, + {'ip_address': gw_ip_sec}, + {'ip_address': gw_ip6_pri, + 'prefixlen': v6_prefixlen}, + {'ip_address': gw_ip6_sec}]} + + ri.external_gateway_removed(ex_gw_port, "qg-fake-name") + + cidr_pri = '%s/%s' % (gw_ip_pri, v4_prefixlen) + cidr_sec = '%s/%s' % (gw_ip_sec, lib_constants.IPv4_BITS) + cidr_v6 = '%s/%s' % (gw_ip6_pri, v6_prefixlen) + cidr_v6_sec = '%s/%s' % (gw_ip6_sec, lib_constants.IPv6_BITS) + + device.delete_addr_and_conntrack_state.assert_has_calls( + [mock.call(cidr_pri), mock.call(cidr_sec), + mock.call(cidr_v6), mock.call(cidr_v6_sec)]) + @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework): @@ -72,7 +101,7 @@ mock.sentinel.interface_name, ip, self.agent_conf) - self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result) + self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, result) def test_add_floating_ip_error(self, send_ip_addr_adv_notif): ri = self._create_router() @@ -81,4 +110,4 @@ mock.sentinel.interface_name, mock.sentinel.device) self.assertFalse(ip_lib.send_ip_addr_adv_notif.called) - self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result) + self.assertEqual(lib_constants.FLOATINGIP_STATUS_ERROR, result) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_namespace_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_namespace_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_namespace_manager.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_namespace_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -91,6 +91,13 @@ retrieved_ns_names = self.ns_manager.list_all() self.assertFalse(retrieved_ns_names) + def test_ensure_snat_cleanup(self): + router_id = _uuid() + with mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup: + self.ns_manager.ensure_snat_cleanup(router_id) + mock_cleanup.assert_called_once_with(dvr_snat_ns.SNAT_NS_PREFIX, + router_id) + def test_ensure_router_cleanup(self): router_id = _uuid() ns_names = [namespaces.NS_PREFIX + _uuid() for _ in range(5)] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_router_info.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_router_info.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/l3/test_router_info.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/l3/test_router_info.py 2016-08-29 20:05:49.000000000 +0000 @@ -11,7 +11,7 @@ # under the License. import mock -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from oslo_utils import uuidutils from neutron.agent.common import config as agent_config @@ -127,7 +127,7 @@ port = { 'id': _uuid(), 'fixed_ips': [{'ip_address': '172.9.9.9'}], - 'address_scopes': {l3_constants.IP_VERSION_4: '1234'} + 'address_scopes': {lib_constants.IP_VERSION_4: '1234'} } ipv4_mangle = ri.iptables_manager.ipv4['mangle'] = mock.MagicMock() ri.get_address_scope_mark_mask = mock.Mock(return_value='fake_mark') @@ -137,7 +137,7 @@ ri.process_floating_ip_address_scope_rules = mock.Mock() ri.iptables_manager._apply = mock.Mock() - ri.router[l3_constants.INTERFACE_KEY] = [port] + ri.router[lib_constants.INTERFACE_KEY] = [port] ri.process_address_scope() ipv4_mangle.add_rule.assert_called_once_with( @@ -166,6 +166,23 @@ self.assertEqual(new_mark_ids, new_ri.available_mark_ids) self.assertTrue(ri.available_mark_ids != new_ri.available_mark_ids) + def test_process_delete(self): + ri = router_info.RouterInfo(_uuid(), {}, **self.ri_kwargs) + ri.router = {'id': _uuid()} + with mock.patch.object(ri, '_process_internal_ports') as p_i_p,\ + mock.patch.object(ri, '_process_external_on_delete') as p_e_o_d: + self.mock_ip.netns.exists.return_value = False + ri.process_delete(mock.Mock()) + self.assertFalse(p_i_p.called) + self.assertFalse(p_e_o_d.called) + + p_i_p.reset_mock() + p_e_o_d.reset_mock() + self.mock_ip.netns.exists.return_value = True + ri.process_delete(mock.Mock()) + p_i_p.assert_called_once_with(mock.ANY) + p_e_o_d.assert_called_once_with(mock.ANY) + class BasicRouterTestCaseFramework(base.BaseTestCase): def _create_router(self, router=None, **kwargs): @@ -321,8 +338,8 @@ statuses = ri.put_fips_in_error_state() - expected = [{mock.sentinel.id1: l3_constants.FLOATINGIP_STATUS_ERROR, - mock.sentinel.id2: l3_constants.FLOATINGIP_STATUS_ERROR}] + expected = [{mock.sentinel.id1: lib_constants.FLOATINGIP_STATUS_ERROR, + mock.sentinel.id2: lib_constants.FLOATINGIP_STATUS_ERROR}] self.assertNotEqual(expected, statuses) def test_configure_fip_addresses(self): @@ -355,7 +372,7 @@ 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.2', - 'status': l3_constants.FLOATINGIP_STATUS_DOWN + 'status': lib_constants.FLOATINGIP_STATUS_DOWN } IPDevice.return_value = device = mock.Mock() @@ -365,7 +382,7 @@ fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) - self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ACTIVE}, + self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE}, fip_statuses) self.assertFalse(device.addr.add.called) @@ -400,13 +417,13 @@ } ri = self._create_router() ri.add_floating_ip = mock.Mock( - return_value=l3_constants.FLOATINGIP_STATUS_ERROR) + return_value=lib_constants.FLOATINGIP_STATUS_ERROR) ri.get_floating_ips = mock.Mock(return_value=[fip]) fip_statuses = ri.process_floating_ip_addresses( mock.sentinel.interface_name) - self.assertEqual({fip_id: l3_constants.FLOATINGIP_STATUS_ERROR}, + self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ERROR}, fip_statuses) # TODO(mrsmith): refactor for DVR cases @@ -422,3 +439,44 @@ mock.sentinel.interface_name) self.assertEqual({}, fip_statuses) ri.remove_floating_ip.assert_called_once_with(device, '15.1.2.3/32') + + def test_process_floating_ip_reassignment(self, IPDevice): + IPDevice.return_value = device = mock.Mock() + device.addr.list.return_value = [{'cidr': '15.1.2.3/32'}] + + fip_id = _uuid() + fip = { + 'id': fip_id, 'port_id': _uuid(), + 'floating_ip_address': '15.1.2.3', + 'fixed_ip_address': '192.168.0.3', + 'status': 'DOWN' + } + ri = self._create_router() + ri.get_floating_ips = mock.Mock(return_value=[fip]) + ri.move_floating_ip = mock.Mock() + ri.fip_map = {'15.1.2.3': '192.168.0.2'} + + ri.process_floating_ip_addresses(mock.sentinel.interface_name) + ri.move_floating_ip.assert_called_once_with(fip) + + def test_process_floating_ip_addresses_gw_secondary_ip_not_removed( + self, IPDevice): + IPDevice.return_value = device = mock.Mock() + device.addr.list.return_value = [{'cidr': '1.1.1.1/16'}, + {'cidr': '2.2.2.2/32'}, + {'cidr': '3.3.3.3/32'}, + {'cidr': '4.4.4.4/32'}] + ri = self._create_router() + + ri.get_floating_ips = mock.Mock(return_value=[ + {'id': _uuid(), + 'floating_ip_address': '3.3.3.3', + 'status': 'DOWN'}]) + ri.add_floating_ip = mock.Mock() + ri.get_ex_gw_port = mock.Mock(return_value={ + "fixed_ips": [{"ip_address": "1.1.1.1"}, + {"ip_address": "2.2.2.2"}]}) + ri.remove_floating_ip = mock.Mock() + + ri.process_floating_ip_addresses("qg-fake-device") + ri.remove_floating_ip.assert_called_once_with(device, '4.4.4.4/32') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_bridge_lib.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_bridge_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_bridge_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_bridge_lib.py 2016-08-29 20:05:49.000000000 +0000 @@ -88,6 +88,13 @@ def test_addbr_without_namespace(self): self._test_br() + def test_addbr_exists(self): + self.execute.side_effect = RuntimeError() + with mock.patch.object(bridge_lib.BridgeDevice, 'exists', + return_value=True): + bridge_lib.BridgeDevice.addbr(self._BR_NAME) + bridge_lib.BridgeDevice.addbr(self._BR_NAME) + def test_owns_interface(self): br = bridge_lib.BridgeDevice('br-int') exists = lambda path: path == "/sys/class/net/br-int/brif/abc" diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_dhcp.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_dhcp.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_dhcp.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_dhcp.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,12 +21,12 @@ from oslo_config import cfg from neutron.agent.common import config -from neutron.agent.dhcp import config as dhcp_config from neutron.agent.linux import dhcp from neutron.agent.linux import external_process -from neutron.common import config as base_config from neutron.common import constants as n_const from neutron.common import utils +from neutron.conf.agent import dhcp as dhcp_config +from neutron.conf import common as base_config from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.tests import base from neutron.tests import tools @@ -465,7 +465,7 @@ self.host_routes = [FakeV6HostRoute()] self.dns_nameservers = ['2001:0200:feed:7ac0::1'] self.ipv6_ra_mode = None - self.ipv6_address_mode = n_const.DHCPV6_STATEFUL + self.ipv6_address_mode = constants.DHCPV6_STATEFUL class FakeV6SubnetSlaac(object): @@ -476,7 +476,7 @@ self.gateway_ip = 'ffda:3ba5:a17a:4ba3::1' self.enable_dhcp = True self.host_routes = [FakeV6HostRoute()] - self.ipv6_address_mode = n_const.IPV6_SLAAC + self.ipv6_address_mode = constants.IPV6_SLAAC self.ipv6_ra_mode = None @@ -489,7 +489,7 @@ self.enable_dhcp = True self.dns_nameservers = [] self.host_routes = [] - self.ipv6_address_mode = n_const.DHCPV6_STATELESS + self.ipv6_address_mode = constants.DHCPV6_STATELESS self.ipv6_ra_mode = None @@ -1064,7 +1064,7 @@ possible_leases = 0 for i, s in enumerate(network.subnets): if (s.ip_version != 6 - or s.ipv6_address_mode == n_const.DHCPV6_STATEFUL): + or s.ipv6_address_mode == constants.DHCPV6_STATEFUL): if s.ip_version == 4: expected.extend([prefix % ( i, s.cidr.split('/')[0], lease_duration, seconds)]) @@ -2024,6 +2024,8 @@ # Create DeviceManager. self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', default=False)) + self.conf.register_opt(cfg.BoolOpt('force_metadata', + default=False)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device @@ -2093,15 +2095,16 @@ self._test_setup(self.mock_load_interface_driver, self.mock_ip_lib, use_gateway_ips=True) - def test_setup_reserved(self): - """Test reserved port case of DeviceManager's DHCP port setup - logic. - """ - + def _test_setup_reserved(self, enable_isolated_metadata=False, + force_metadata=False): with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. - self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', - default=False)) + self.conf.register_opt( + cfg.BoolOpt('enable_isolated_metadata', + default=enable_isolated_metadata)) + self.conf.register_opt( + cfg.BoolOpt('force_metadata', + default=force_metadata)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device @@ -2128,19 +2131,48 @@ plugin.update_dhcp_port.assert_called_with(reserved_port.id, mock.ANY) + except_ips = ['192.168.0.6/24'] + if enable_isolated_metadata or force_metadata: + except_ips.append(dhcp.METADATA_DEFAULT_CIDR) mgr.driver.init_l3.assert_called_with('ns-XXX', - ['192.168.0.6/24'], + except_ips, namespace='qdhcp-ns') + def test_setup_reserved_and_disable_metadata(self): + """Test reserved port case of DeviceManager's DHCP port setup + logic which metadata disabled. + """ + self._test_setup_reserved() + + def test_setup_reserved_with_isolated_metadata_enable(self): + """Test reserved port case of DeviceManager's DHCP port setup + logic which isolated_ metadata enabled. + """ + self._test_setup_reserved(enable_isolated_metadata=True) + + def test_setup_reserved_with_force_metadata_enable(self): + """Test reserved port case of DeviceManager's DHCP port setup + logic which force_metadata enabled. + """ + self._test_setup_reserved(force_metadata=True) + + def test_setup_reserved_and_enable_metadata(self): + """Test reserved port case of DeviceManager's DHCP port setup + logic which both isolated_metadata and force_metadata enabled. + """ + self._test_setup_reserved(enable_isolated_metadata=True, + force_metadata=True) + def test_setup_reserved_2(self): """Test scenario where a network has two reserved ports, and update_dhcp_port fails for the first of those. """ - with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice: # Create DeviceManager. - self.conf.register_opt(cfg.BoolOpt('enable_isolated_metadata', - default=False)) + self.conf.register_opt( + cfg.BoolOpt('enable_isolated_metadata', default=False)) + self.conf.register_opt( + cfg.BoolOpt('force_metadata', default=False)) plugin = mock.Mock() device = mock.Mock() mock_IPDevice.return_value = device diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_external_process.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_external_process.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_external_process.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_external_process.py 2016-08-29 20:05:49.000000000 +0000 @@ -164,6 +164,35 @@ manager.enable(callback) self.assertFalse(callback.called) + def test_reload_cfg_without_custom_reload_callback(self): + with mock.patch.object(ep.ProcessManager, 'disable') as disable: + manager = ep.ProcessManager(self.conf, 'uuid', namespace='ns') + manager.reload_cfg() + disable.assert_called_once_with('HUP') + + def test_reload_cfg_with_custom_reload_callback(self): + reload_callback = mock.sentinel.callback + with mock.patch.object(ep.ProcessManager, 'disable') as disable: + manager = ep.ProcessManager( + self.conf, 'uuid', namespace='ns', + custom_reload_callback=reload_callback) + manager.reload_cfg() + disable.assert_called_once_with(get_stop_command=reload_callback) + + def test_disable_get_stop_command(self): + cmd = ['the', 'cmd'] + reload_callback = mock.Mock(return_value=cmd) + with mock.patch.object(ep.ProcessManager, 'pid', + mock.PropertyMock(return_value=4)): + with mock.patch.object(ep.ProcessManager, 'active', + mock.PropertyMock(return_value=True)): + manager = ep.ProcessManager( + self.conf, 'uuid', + custom_reload_callback=reload_callback) + manager.disable( + get_stop_command=manager.custom_reload_callback) + self.assertIn(cmd, self.execute.call_args[0]) + def test_disable_no_namespace(self): with mock.patch.object(ep.ProcessManager, 'pid') as pid: pid.__get__ = mock.Mock(return_value=4) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_interface.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_interface.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_interface.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_interface.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,7 +16,6 @@ import mock from neutron_lib import constants from oslo_log import versionutils -import testtools from neutron.agent.common import config from neutron.agent.common import ovs_lib @@ -391,60 +390,49 @@ 'aa:bb:cc:dd:ee:ff', internal=True) - def _test_plug(self, additional_expectation=None, bridge=None, - namespace=None): - additional_expectation = additional_expectation or [] - if not bridge: - bridge = 'br-int' - - def device_exists(dev, namespace=None): - return dev == bridge - - with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace: - ovs = interface.OVSInterfaceDriver(self.conf) - self.device_exists.side_effect = device_exists - ovs.plug('01234567-1234-1234-99', - 'port-1234', - 'tap0', - 'aa:bb:cc:dd:ee:ff', - bridge=bridge, - namespace=namespace) - replace.assert_called_once_with( - 'tap0', - ('type', 'internal'), - ('external_ids', { - 'iface-id': 'port-1234', - 'iface-status': 'active', - 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) - - expected = [mock.call(), - mock.call().device('tap0'), - mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] - expected.extend(additional_expectation) - if namespace: - expected.extend( - [mock.call().ensure_namespace(namespace), - mock.call().ensure_namespace().add_device_to_namespace( - mock.ANY)]) - expected.extend([mock.call().device().link.set_up()]) - - self.ip.assert_has_calls(expected) + def _test_plug(self, bridge=None, namespace=None): + with mock.patch('neutron.agent.ovsdb.native.connection.' + 'Connection.start'): + if not bridge: + bridge = 'br-int' + + def device_exists(dev, namespace=None): + return dev == bridge + + with mock.patch.object(ovs_lib.OVSBridge, + 'replace_port') as replace: + ovs = interface.OVSInterfaceDriver(self.conf) + self.device_exists.side_effect = device_exists + ovs.plug('01234567-1234-1234-99', + 'port-1234', + 'tap0', + 'aa:bb:cc:dd:ee:ff', + bridge=bridge, + namespace=namespace, + mtu=9000) + replace.assert_called_once_with( + 'tap0', + ('type', 'internal'), + ('external_ids', { + 'iface-id': 'port-1234', + 'iface-status': 'active', + 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) + + expected = [ + mock.call(), + mock.call().device('tap0'), + mock.call().device().link.set_address('aa:bb:cc:dd:ee:ff')] + if namespace: + expected.extend( + [mock.call().ensure_namespace(namespace), + mock.call().ensure_namespace().add_device_to_namespace( + mock.ANY)]) + expected.extend([ + mock.call().device().link.set_mtu(9000), + mock.call().device().link.set_up(), + ]) - def test_mtu_int(self): - self.assertIsNone(self.conf.network_device_mtu) - self.conf.set_override('network_device_mtu', 9000) - self.assertEqual(self.conf.network_device_mtu, 9000) - - def test_validate_min_ipv6_mtu(self): - self.conf.set_override('network_device_mtu', 1200) - with mock.patch('neutron.common.ipv6_utils.is_enabled') as ipv6_status: - with testtools.ExpectedException(SystemExit): - ipv6_status.return_value = True - BaseChild(self.conf) - - def test_plug_mtu(self): - self.conf.set_override('network_device_mtu', 9000) - self._test_plug([mock.call().device().link.set_mtu(9000)]) + self.ip.assert_has_calls(expected) def test_unplug(self, bridge=None): if not bridge: @@ -471,54 +459,53 @@ self._test_plug(devname='qr-0', prefix='qr-') def _test_plug(self, devname=None, bridge=None, namespace=None, - prefix=None, mtu=None): + prefix=None): + with mock.patch('neutron.agent.ovsdb.native.connection.' + 'Connection.start'): + + if not devname: + devname = 'ns-0' + if not bridge: + bridge = 'br-int' - if not devname: - devname = 'ns-0' - if not bridge: - bridge = 'br-int' - - def device_exists(dev, namespace=None): - return dev == bridge + def device_exists(dev, namespace=None): + return dev == bridge - ovs = interface.OVSInterfaceDriver(self.conf) - self.device_exists.side_effect = device_exists - - root_dev = mock.Mock() - ns_dev = mock.Mock() - self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev)) - expected = [mock.call(), - mock.call().add_veth('tap0', devname, - namespace2=namespace)] - - with mock.patch.object(ovs_lib.OVSBridge, 'replace_port') as replace: - ovs.plug('01234567-1234-1234-99', - 'port-1234', - devname, - 'aa:bb:cc:dd:ee:ff', - bridge=bridge, - namespace=namespace, - prefix=prefix) - replace.assert_called_once_with( - 'tap0', - ('external_ids', { - 'iface-id': 'port-1234', - 'iface-status': 'active', - 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) - - ns_dev.assert_has_calls( - [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) - if mtu: - ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) - root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) - - self.ip.assert_has_calls(expected) - root_dev.assert_has_calls([mock.call.link.set_up()]) - ns_dev.assert_has_calls([mock.call.link.set_up()]) + ovs = interface.OVSInterfaceDriver(self.conf) + self.device_exists.side_effect = device_exists - def test_plug_mtu(self): - self.conf.set_override('network_device_mtu', 9000) - self._test_plug(mtu=9000) + root_dev = mock.Mock() + ns_dev = mock.Mock() + self.ip().add_veth = mock.Mock(return_value=(root_dev, ns_dev)) + expected = [mock.call(), + mock.call().add_veth('tap0', devname, + namespace2=namespace)] + + with mock.patch.object(ovs_lib.OVSBridge, + 'replace_port') as replace: + ovs.plug('01234567-1234-1234-99', + 'port-1234', + devname, + 'aa:bb:cc:dd:ee:ff', + bridge=bridge, + namespace=namespace, + prefix=prefix, + mtu=9000) + replace.assert_called_once_with( + 'tap0', + ('external_ids', { + 'iface-id': 'port-1234', + 'iface-status': 'active', + 'attached-mac': 'aa:bb:cc:dd:ee:ff'})) + + ns_dev.assert_has_calls( + [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) + ns_dev.assert_has_calls([mock.call.link.set_mtu(9000)]) + root_dev.assert_has_calls([mock.call.link.set_mtu(9000)]) + + self.ip.assert_has_calls(expected) + root_dev.assert_has_calls([mock.call.link.set_up()]) + ns_dev.assert_has_calls([mock.call.link.set_up()]) def test_unplug(self, bridge=None): if not bridge: @@ -544,7 +531,7 @@ def test_plug_with_ns(self): self._test_plug(namespace='01234567-1234-1234-99') - def _test_plug(self, namespace=None, mtu=None): + def _test_plug(self, namespace=None): def device_exists(device, namespace=None): return device.startswith('brq') @@ -560,14 +547,14 @@ 'port-1234', 'ns-0', mac_address, - namespace=namespace) + namespace=namespace, + mtu=9000) ip_calls = [mock.call(), mock.call().add_veth('tap0', 'ns-0', namespace2=namespace)] ns_veth.assert_has_calls([mock.call.link.set_address(mac_address)]) - if mtu: - ns_veth.assert_has_calls([mock.call.link.set_mtu(mtu)]) - root_veth.assert_has_calls([mock.call.link.set_mtu(mtu)]) + ns_veth.assert_has_calls([mock.call.link.set_mtu(9000)]) + root_veth.assert_has_calls([mock.call.link.set_mtu(9000)]) self.ip.assert_has_calls(ip_calls) @@ -585,11 +572,6 @@ self.assertFalse(self.ip_dev.called) self.assertEqual(log.call_count, 1) - def test_plug_mtu(self): - self.device_exists.return_value = False - self.conf.set_override('network_device_mtu', 9000) - self._test_plug(mtu=9000) - def test_unplug_no_device(self): self.device_exists.return_value = False self.ip_dev().link.delete.side_effect = RuntimeError @@ -623,8 +605,7 @@ def test_plug_with_prefix(self): self._test_plug(devname='qr-0', prefix='qr-') - def _test_plug(self, devname=None, namespace=None, - prefix=None, mtu=None): + def _test_plug(self, devname=None, namespace=None, prefix=None): if not devname: devname = 'ns-0' @@ -651,14 +632,14 @@ devname, 'aa:bb:cc:dd:ee:ff', namespace=namespace, - prefix=prefix) + prefix=prefix, + mtu=9000) execute.assert_called_once_with(ivsctl_cmd, run_as_root=True) ns_dev.assert_has_calls( [mock.call.link.set_address('aa:bb:cc:dd:ee:ff')]) - if mtu: - ns_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) - root_dev.assert_has_calls([mock.call.link.set_mtu(mtu)]) + ns_dev.assert_has_calls([mock.call.link.set_mtu(9000)]) + root_dev.assert_has_calls([mock.call.link.set_mtu(9000)]) if namespace: expected.extend( [mock.call().ensure_namespace(namespace), @@ -669,10 +650,6 @@ root_dev.assert_has_calls([mock.call.link.set_up()]) ns_dev.assert_has_calls([mock.call.link.set_up()]) - def test_plug_mtu(self): - self.conf.set_override('network_device_mtu', 9000) - self._test_plug(mtu=9000) - def test_plug_namespace(self): self._test_plug(namespace='mynamespace') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_ip_lib.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_ip_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_ip_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_ip_lib.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,11 +15,12 @@ import mock import netaddr +from neutron_lib import exceptions import testtools from neutron.agent.common import utils # noqa from neutron.agent.linux import ip_lib -from neutron.common import exceptions +from neutron.common import exceptions as n_exc from neutron.tests import base NETNS_SAMPLE = [ @@ -281,7 +282,8 @@ self.assertTrue(fake_str.split.called) self.assertEqual(retval, [ip_lib.IPDevice('lo', namespace='foo')]) - def test_get_namespaces(self): + def test_get_namespaces_non_root(self): + self.config(group='AGENT', use_helper_for_ns_read=False) self.execute.return_value = '\n'.join(NETNS_SAMPLE) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(retval, @@ -289,9 +291,11 @@ 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) - self.execute.assert_called_once_with([], 'netns', ('list',)) + self.execute.assert_called_once_with([], 'netns', ('list',), + run_as_root=False) - def test_get_namespaces_iproute2_4(self): + def test_get_namespaces_iproute2_4_root(self): + self.config(group='AGENT', use_helper_for_ns_read=True) self.execute.return_value = '\n'.join(NETNS_SAMPLE_IPROUTE2_4) retval = ip_lib.IPWrapper.get_namespaces() self.assertEqual(retval, @@ -299,7 +303,8 @@ 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', 'cccccccc-cccc-cccc-cccc-cccccccccccc']) - self.execute.assert_called_once_with([], 'netns', ('list',)) + self.execute.assert_called_once_with([], 'netns', ('list',), + run_as_root=True) def test_add_tuntap(self): ip_lib.IPWrapper().add_tuntap('tap0') @@ -476,7 +481,7 @@ def test_add_vxlan_invalid_port_length(self): wrapper = ip_lib.IPWrapper() - self.assertRaises(exceptions.NetworkVxlanPortRangeError, + self.assertRaises(n_exc.NetworkVxlanPortRangeError, wrapper.add_vxlan, 'vxlan0', 'vni0', group='group0', dev='dev0', ttl='ttl0', tos='tos0', local='local0', proxy=True, @@ -966,6 +971,10 @@ self.assertEqual(self.route_cmd.get_gateway(), test_case['expected']) + def test_flush_route_table(self): + self.route_cmd.flush(self.ip_version, self.table) + self._assert_sudo([self.ip_version], ('flush', 'table', self.table)) + def test_add_route(self): self.route_cmd.add_route(self.cidr, self.ip, self.table) self._assert_sudo([self.ip_version], diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_iptables_firewall.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_iptables_firewall.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_iptables_firewall.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_iptables_firewall.py 2016-08-29 20:05:49.000000000 +0000 @@ -26,9 +26,9 @@ from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_firewall -from neutron.agent import securitygroups_rpc as sg_cfg from neutron.common import exceptions as n_exc from neutron.common import utils +from neutron.conf.agent import securitygroups_rpc as security_config from neutron.tests import base from neutron.tests.unit.api.v2 import test_base @@ -71,7 +71,7 @@ def setUp(self): super(BaseIptablesFirewallTestCase, self).setUp() cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT') - cfg.CONF.register_opts(sg_cfg.security_group_opts, 'SECURITYGROUP') + security_config.register_securitygroups_opts() cfg.CONF.set_override('comment_iptables_rules', False, 'AGENT') self.utils_exec_p = mock.patch( 'neutron.agent.linux.utils.execute') @@ -1158,6 +1158,56 @@ extra_ok_codes=[1])] self.utils_exec.assert_has_calls(calls) + def test_remove_conntrack_entries_for_sg_member_changed_ipv4(self): + for direction in ['ingress', 'egress']: + for protocol in [None, 'tcp', 'icmp', 'udp']: + self._test_remove_conntrack_entries_sg_member_changed( + 'IPv4', protocol, direction) + + def test_remove_conntrack_entries_for_sg_member_changed_ipv6(self): + for direction in ['ingress', 'egress']: + for protocol in [None, 'tcp', 'icmp', 'udp']: + self._test_remove_conntrack_entries_sg_member_changed( + 'IPv6', protocol, direction) + + def _test_remove_conntrack_entries_sg_member_changed(self, ethertype, + protocol, direction): + port = self._fake_port() + port['security_groups'] = ['fake_sg_id'] + self.firewall.sg_rules.setdefault('fake_sg_id', []) + self.firewall.sg_rules['fake_sg_id'].append( + {'direction': direction, 'remote_group_id': 'fake_sg_id2', + 'ethertype': ethertype}) + + self.firewall.filter_defer_apply_on() + self.firewall.devices_with_updated_sg_members['fake_sg_id2'] = [port] + if ethertype == "IPv4": + self.firewall.pre_sg_members = {'fake_sg_id2': { + 'IPv4': ['10.0.0.2', '10.0.0.3']}} + self.firewall.sg_members = {'fake_sg_id2': { + 'IPv4': ['10.0.0.3']}} + ethertype = "ipv4" + else: + self.firewall.pre_sg_members = {'fake_sg_id2': { + 'IPv6': ['fe80::2', 'fe80::3']}} + self.firewall.sg_members = {'fake_sg_id2': { + 'IPv6': ['fe80::3']}} + ethertype = "ipv6" + self.firewall.filter_defer_apply_off() + direction = '-d' if direction == 'ingress' else '-s' + remote_ip_direction = '-s' if direction == '-d' else '-d' + ips = {"ipv4": ['10.0.0.1', '10.0.0.2'], + "ipv6": ['fe80::1', 'fe80::2']} + calls = [ + # initial data has 1, 2, and 9 in use, CT zone will start + # at 10. + mock.call(['conntrack', '-D', '-f', ethertype, direction, + ips[ethertype][0], '-w', 10, + remote_ip_direction, ips[ethertype][1]], + run_as_root=True, check_exit_code=True, + extra_ok_codes=[1])] + self.utils_exec.assert_has_calls(calls) + def test_user_sg_rules_deduped_before_call_to_iptables_manager(self): port = self._fake_port() port['security_group_rules'] = [{'ethertype': 'IPv4', @@ -1631,13 +1681,9 @@ def _fake_sg_members(self, sg_ids=None): return {sg_id: copy.copy(FAKE_IP) for sg_id in (sg_ids or [FAKE_SGID])} - def test_prepare_port_filter_with_new_members(self): - self.firewall.sg_rules = self._fake_sg_rules() - self.firewall.sg_members = {'fake_sgid': { - 'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']}} - self.firewall.pre_sg_members = {} - port = self._fake_port() - self.firewall.prepare_port_filter(port) + def test_update_security_group_members(self): + sg_members = {'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']} + self.firewall.update_security_group_members('fake_sgid', sg_members) calls = [ mock.call.set_members('fake_sgid', 'IPv4', ['10.0.0.1', '10.0.0.2']), @@ -1774,34 +1820,14 @@ self.assertEqual(1, len(sg_chain_v4_accept)) self.assertEqual(1, len(sg_chain_v6_accept)) - def test_prepare_port_filter_with_deleted_member(self): - self.firewall.sg_rules = self._fake_sg_rules() - self.firewall.pre_sg_rules = self._fake_sg_rules() - self.firewall.sg_members = {'fake_sgid': { - 'IPv4': [ - '10.0.0.1', '10.0.0.3', '10.0.0.4', '10.0.0.5'], - 'IPv6': ['fe80::1']}} - self.firewall.pre_sg_members = {'fake_sgid': { - 'IPv4': ['10.0.0.2'], - 'IPv6': ['fe80::1']}} - self.firewall.prepare_port_filter(self._fake_port()) - calls = [ - mock.call.set_members('fake_sgid', 'IPv4', - ['10.0.0.1', '10.0.0.3', '10.0.0.4', - '10.0.0.5']), - mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])] - - self.firewall.ipset.assert_has_calls(calls, True) - def test_remove_port_filter_with_destroy_ipset_chain(self): self.firewall.sg_rules = self._fake_sg_rules() port = self._fake_port() - self.firewall.sg_members = {'fake_sgid': { - 'IPv4': ['10.0.0.1'], - 'IPv6': ['fe80::1']}} self.firewall.pre_sg_members = {'fake_sgid': { 'IPv4': [], 'IPv6': []}} + sg_members = {'IPv4': ['10.0.0.1'], 'IPv6': ['fe80::1']} + self.firewall.update_security_group_members('fake_sgid', sg_members) self.firewall.prepare_port_filter(port) self.firewall.filter_defer_apply_on() self.firewall.sg_members = {'fake_sgid': { @@ -1824,24 +1850,6 @@ self.firewall.ipset.assert_has_calls(calls, any_order=True) - def test_prepare_port_filter_with_sg_no_member(self): - self.firewall.sg_rules = self._fake_sg_rules() - self.firewall.sg_rules[FAKE_SGID].append( - {'direction': 'ingress', 'remote_group_id': 'fake_sgid2', - 'ethertype': 'IPv4'}) - self.firewall.sg_rules.update() - self.firewall.sg_members['fake_sgid'] = { - 'IPv4': ['10.0.0.1', '10.0.0.2'], 'IPv6': ['fe80::1']} - self.firewall.pre_sg_members = {} - port = self._fake_port() - port['security_group_source_groups'].append('fake_sgid2') - self.firewall.prepare_port_filter(port) - calls = [mock.call.set_members('fake_sgid', 'IPv4', - ['10.0.0.1', '10.0.0.2']), - mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])] - - self.firewall.ipset.assert_has_calls(calls, any_order=True) - def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self): self.firewall.sg_rules = self._fake_sg_rules() self.firewall.pre_sg_rules = self._fake_sg_rules() @@ -1903,14 +1911,6 @@ mac_ipv4_pairs, mac_ipv6_pairs) self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs) - def test_update_ipset_members(self): - self.firewall.sg_members[FAKE_SGID][_IPv4] = [] - self.firewall.sg_members[FAKE_SGID][_IPv6] = [] - sg_info = {constants.IPv4: [FAKE_SGID]} - self.firewall._update_ipset_members(sg_info) - calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])] - self.firewall.ipset.assert_has_calls(calls) - class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_polling.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_polling.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/linux/test_polling.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/linux/test_polling.py 2016-08-29 20:05:49.000000000 +0000 @@ -45,7 +45,7 @@ def test_start_calls_monitor_start(self): with mock.patch.object(self.pm._monitor, 'start') as mock_start: self.pm.start() - mock_start.assert_called_with() + mock_start.assert_called_with(block=True) def test_stop_calls_monitor_stop(self): with mock.patch.object(self.pm._monitor, 'stop') as mock_stop: diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/metadata/test_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/metadata/test_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/metadata/test_agent.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/metadata/test_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -51,6 +51,16 @@ self.config(cache_url='memory://?default_ttl=5') +class NewCacheConfFixture(ConfFixture): + def setUp(self): + super(NewCacheConfFixture, self).setUp() + self.config( + group='cache', + enabled=True, + backend='oslo_cache.dict', + expiration_time=5) + + class TestMetadataProxyHandlerBase(base.BaseTestCase): fake_conf = cfg.CONF fake_conf_fixture = ConfFixture(fake_conf) @@ -96,9 +106,7 @@ self.assertEqual(expected, ports) -class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase): - fake_conf = cfg.CONF - fake_conf_fixture = CacheConfFixture(fake_conf) +class _TestMetadataProxyHandlerCacheMixin(object): def test_call(self): req = mock.Mock() @@ -411,6 +419,18 @@ ) +class TestMetadataProxyHandlerCache(TestMetadataProxyHandlerBase, + _TestMetadataProxyHandlerCacheMixin): + fake_conf = cfg.CONF + fake_conf_fixture = CacheConfFixture(fake_conf) + + +class TestMetadataProxyHandlerNewCache(TestMetadataProxyHandlerBase, + _TestMetadataProxyHandlerCacheMixin): + fake_conf = cfg.CONF + fake_conf_fixture = NewCacheConfFixture(fake_conf) + + class TestMetadataProxyHandlerNoCache(TestMetadataProxyHandlerCache): fake_conf = cfg.CONF fake_conf_fixture = ConfFixture(fake_conf) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/metadata/test_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/metadata/test_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/metadata/test_driver.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/metadata/test_driver.py 2016-08-03 20:10:34.000000000 +0000 @@ -19,11 +19,11 @@ from neutron.agent.common import config as agent_config from neutron.agent.l3 import agent as l3_agent -from neutron.agent.l3 import config as l3_config from neutron.agent.l3 import ha as l3_ha_agent from neutron.agent.metadata import config from neutron.agent.metadata import driver as metadata_driver from neutron.common import constants +from neutron.conf.agent.l3 import config as l3_config from neutron.tests import base @@ -74,7 +74,7 @@ mock.patch('neutron.agent.l3.ha.AgentMixin' '._init_ha_conf_path').start() - cfg.CONF.register_opts(l3_config.OPTS) + l3_config.register_l3_agent_config_opts(l3_config.OPTS, cfg.CONF) cfg.CONF.register_opts(l3_ha_agent.OPTS) cfg.CONF.register_opts(config.SHARED_OPTS) cfg.CONF.register_opts(config.DRIVER_OPTS) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/ovsdb/native/test_connection.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/ovsdb/native/test_connection.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/ovsdb/native/test_connection.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/ovsdb/native/test_connection.py 2016-08-03 20:10:34.000000000 +0000 @@ -36,7 +36,8 @@ gsh.return_value = helper = mock.Mock() self.connection = connection.Connection( mock.Mock(), mock.Mock(), mock.Mock()) - with mock.patch.object(poller, 'Poller') as poller_mock: + with mock.patch.object(poller, 'Poller') as poller_mock,\ + mock.patch('threading.Thread'): poller_mock.return_value.block.side_effect = eventlet.sleep self.connection.start(table_name_list=table_name_list) reg_all_called = table_name_list is None diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/ovsdb/test_impl_idl.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/ovsdb/test_impl_idl.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/ovsdb/test_impl_idl.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/ovsdb/test_impl_idl.py 2016-08-03 20:10:34.000000000 +0000 @@ -24,7 +24,15 @@ class TransactionTestCase(base.BaseTestCase): def test_commit_raises_exception_on_timeout(self): with mock.patch.object(queue, 'Queue') as mock_queue: - transaction = impl_idl.Transaction(mock.sentinel, mock.Mock(), 0) + transaction = impl_idl.NeutronOVSDBTransaction(mock.sentinel, + mock.Mock(), 0) mock_queue.return_value.get.side_effect = queue.Empty with testtools.ExpectedException(api.TimeoutException): transaction.commit() + + def test_post_commit_does_not_raise_exception(self): + with mock.patch.object(impl_idl.NeutronOVSDBTransaction, + "do_post_commit", side_effect=Exception): + transaction = impl_idl.NeutronOVSDBTransaction(mock.sentinel, + mock.Mock(), 0) + transaction.post_commit(mock.Mock()) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/test_agent_extensions_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/test_agent_extensions_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/test_agent_extensions_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/test_agent_extensions_manager.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg + +from neutron.agent import agent_extensions_manager as ext_manager +from neutron.tests import base + + +class TestAgentExtensionsManager(base.BaseTestCase): + + def setUp(self): + super(TestAgentExtensionsManager, self).setUp() + mock.patch('neutron.agent.l2.extensions.qos.QosAgentExtension', + autospec=True).start() + conf = cfg.CONF + ext_manager.register_opts(conf) + cfg.CONF.set_override('extensions', ['qos'], 'agent') + namespace = 'neutron.agent.l2.extensions' + self.manager = ext_manager.AgentExtensionsManager(conf, namespace) + + def _get_extension(self): + return self.manager.extensions[0].obj + + def test_initialize(self): + connection = object() + self.manager.initialize(connection, 'fake_driver_type') + ext = self._get_extension() + ext.initialize.assert_called_once_with(connection, 'fake_driver_type') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/test_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/test_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/test_rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/test_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -25,7 +25,8 @@ class AgentRPCPluginApi(base.BaseTestCase): def _test_rpc_call(self, method): agent = rpc.PluginApi('fake_topic') - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') expect_val = 'foo' with mock.patch.object(agent.client, 'call') as mock_call,\ mock.patch.object(agent.client, 'prepare') as mock_prepare: @@ -46,7 +47,8 @@ def test_devices_details_list_unsupported(self): agent = rpc.PluginApi('fake_topic') - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') expect_val_get_device_details = 'foo' expect_val = [expect_val_get_device_details] with mock.patch.object(agent.client, 'call') as mock_call, \ @@ -75,7 +77,8 @@ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') reportStateAPI.report_state(ctxt, expected_agent_state, use_call=True) self.assertEqual(mock_call.call_args[0][0], ctxt) @@ -94,7 +97,8 @@ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') reportStateAPI.report_state(ctxt, expected_agent_state) self.assertEqual(mock_cast.call_args[0][0], ctxt) self.assertEqual(mock_cast.call_args[0][1], 'report_state') @@ -116,8 +120,8 @@ mock.patch.object(reportStateAPI.client, 'prepare' ) as mock_prepare: mock_prepare.return_value = reportStateAPI.client - ctxt = oslo_context.RequestContext('fake_user', - 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') reportStateAPI.report_state(ctxt, expected_agent_state) self.assertEqual(expected_time_str, mock_cast.call_args[1]['time']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/test_securitygroups_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/test_securitygroups_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/test_securitygroups_rpc.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/test_securitygroups_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -27,7 +27,6 @@ from neutron.agent.linux import iptables_manager from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.handlers import securitygroups_rpc -from neutron.common import constants as n_const from neutron.common import ipv6_utils as ipv6 from neutron.common import rpc as n_rpc from neutron import context @@ -35,7 +34,6 @@ from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import securitygroup as ext_sg from neutron import manager -from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent from neutron.tests import base from neutron.tests import tools from neutron.tests.unit.extensions import test_securitygroup as test_sg @@ -683,7 +681,7 @@ with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC + ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] @@ -751,7 +749,7 @@ with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC + ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] @@ -826,7 +824,7 @@ with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC + ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] @@ -894,7 +892,7 @@ with self.network() as n,\ self.subnet(n, gateway_ip=fake_gateway, cidr=fake_prefix, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC + ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] @@ -942,7 +940,7 @@ fake_prefix = FAKE_PREFIX[const.IPv6] with self.network() as n,\ self.subnet(n, gateway_ip=None, cidr=fake_prefix, - ip_version=6, ipv6_ra_mode=n_const.IPV6_SLAAC + ip_version=6, ipv6_ra_mode=const.IPV6_SLAAC ) as subnet_v6,\ self.security_group() as sg1: sg1_id = sg1['security_group']['id'] @@ -3121,12 +3119,8 @@ test_rpc_v1_1) def _init_agent(self, defer_refresh_firewall): - fake_map = ovs_neutron_agent.LocalVLANMapping(1, 'network_type', - 'physical_network', 1) - local_vlan_map = {'fakenet': fake_map} self.agent = sg_rpc.SecurityGroupAgentRpc( context=None, plugin_rpc=self.rpc, - local_vlan_map=local_vlan_map, defer_refresh_firewall=defer_refresh_firewall) self._enforce_order_in_firewall(self.agent.firewall) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/windows/test_ip_lib.py neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/windows/test_ip_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/agent/windows/test_ip_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/agent/windows/test_ip_lib.py 2016-08-03 20:10:34.000000000 +0000 @@ -37,13 +37,6 @@ self.assertEqual(mock_dev2, ret) - @mock.patch.object(ip_lib.IPWrapper, 'get_devices') - def test_get_device_by_ip_exception(self, mock_get_devices): - mock_get_devices.side_effects = OSError - ret = ip_lib.IPWrapper().get_device_by_ip(mock.sentinel.fake_ip) - - self.assertIsNone(ret) - @mock.patch('netifaces.interfaces') def test_get_devices(self, mock_interfaces): mock_interfaces.return_value = [mock.sentinel.dev1, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py 2016-08-03 20:10:34.000000000 +0000 @@ -151,10 +151,12 @@ self.assertEqual(expected_casts, self.mock_cast.call_count) def _test__notify_agents(self, method, - expected_scheduling=0, expected_casts=0): + expected_scheduling=0, expected_casts=0, + payload=None): + payload = payload or {'port': {}} self._test__notify_agents_with_function( lambda: self.notifier._notify_agents( - mock.Mock(), method, {'port': {}}, 'foo_network_id'), + mock.Mock(), method, payload, 'foo_network_id'), expected_scheduling, expected_casts) def test__notify_agents_cast_required_with_scheduling(self): @@ -167,7 +169,26 @@ def test__notify_agents_cast_required_with_scheduling_subnet_create(self): self._test__notify_agents('subnet_create_end', - expected_scheduling=1, expected_casts=1) + expected_scheduling=1, expected_casts=1, + payload={'subnet': {}}) + + def test__notify_agents_cast_required_with_scheduling_segment(self): + network_id = 'foo_network_id' + segment_id = 'foo_segment_id' + subnet = {'subnet': {'segment_id': segment_id}} + segment = {'id': segment_id, 'network_id': network_id, + 'hosts': ['host-a']} + self.notifier.plugin.get_network.return_value = {'id': network_id} + segment_sp = mock.Mock() + segment_sp.get_segment.return_value = segment + with mock.patch('neutron.manager.NeutronManager.get_service_plugins', + return_value={'segments': segment_sp}): + self._test__notify_agents('subnet_create_end', + expected_scheduling=1, expected_casts=1, + payload=subnet) + get_agents = self.notifier.plugin.get_dhcp_agents_hosting_networks + get_agents.assert_called_once_with( + mock.ANY, [network_id], hosts=segment['hosts']) def test__notify_agents_no_action(self): self._test__notify_agents('network_create_end', diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,47 @@ +# Copyright (c) 2016 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api +from neutron.tests import base + + +class TestL3AgentNotifyAPI(base.BaseTestCase): + + def setUp(self): + super(TestL3AgentNotifyAPI, self).setUp() + self.rpc_client_mock = mock.patch( + 'neutron.common.rpc.get_client').start().return_value + self.l3_notifier = l3_rpc_agent_api.L3AgentNotifyAPI() + + def _test_arp_update(self, method): + arp_table = {'ip_address': '1.1.1.1', + 'mac_address': '22:f1:6c:9c:79:4a', + 'subnet_id': 'subnet_id'} + router_id = 'router_id' + getattr(self.l3_notifier, method)(mock.Mock(), router_id, arp_table) + self.rpc_client_mock.prepare.assert_called_once_with( + fanout=True, version='1.2') + cctxt = self.rpc_client_mock.prepare.return_value + cctxt.cast.assert_called_once_with( + mock.ANY, method, + payload={'router_id': router_id, 'arp_table': arp_table}) + + def test_add_arp_entry(self): + self._test_arp_update('add_arp_entry') + + def test_del_arp_entry(self): + self._test_arp_update('del_arp_entry') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/callbacks/consumer/test_registry.py 2016-08-03 20:10:34.000000000 +0000 @@ -51,6 +51,6 @@ callback2 = mock.Mock() callbacks = {callback1, callback2} manager_mock().get_callbacks.return_value = callbacks - registry.push(resource_type_, resource_, event_type_) + registry.push(resource_type_, [resource_], event_type_) for callback in callbacks: - callback.assert_called_with(resource_, event_type_) + callback.assert_called_with([resource_], event_type_) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py 2016-08-03 20:10:34.000000000 +0000 @@ -44,6 +44,10 @@ self.mock_set_dirty = set_dirty_p.start() self.utils_p = mock.patch('neutron.plugins.common.utils.create_port') self.utils = self.utils_p.start() + self.segment_p = mock.patch( + 'neutron.manager.NeutronManager.get_service_plugins') + self.get_service_plugins = self.segment_p.start() + self.segment_plugin = mock.MagicMock() def test_group_by_network_id(self): port1 = {'network_id': 'a'} @@ -67,6 +71,26 @@ {'id': 'b', 'subnets': [subnet], 'ports': []}] self.assertEqual(expected, networks) + def test_get_active_networks_info_with_routed_networks(self): + self.get_service_plugins.return_value = { + 'segments': self.segment_plugin + } + plugin_retval = [{'id': 'a'}, {'id': 'b'}] + port = {'network_id': 'a'} + subnets = [{'network_id': 'b', 'id': 'c', 'segment_id': '1'}, + {'network_id': 'a', 'id': 'e'}, + {'network_id': 'b', 'id': 'd', 'segment_id': '3'}] + self.plugin.get_ports.return_value = [port] + self.plugin.get_networks.return_value = plugin_retval + hostseg_retval = ['1', '2'] + self.segment_plugin.get_segments_by_hosts.return_value = hostseg_retval + self.plugin.get_subnets.return_value = subnets + networks = self.callbacks.get_active_networks_info(mock.Mock(), + host='host') + expected = [{'id': 'a', 'subnets': [subnets[1]], 'ports': [port]}, + {'id': 'b', 'subnets': [subnets[0]], 'ports': []}] + self.assertEqual(expected, networks) + def _test__port_action_with_failures(self, exc=None, action=None): port = { 'network_id': 'foo_network_id', @@ -139,22 +163,56 @@ retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') self.assertIsNone(retval) - def test_get_network_info(self): + def _test_get_network_info(self, segmented_network=False, + routed_network=False): network_retval = dict(id='a') - - subnet_retval = [dict(id='a'), dict(id='c'), dict(id='b')] + if not routed_network: + subnet_retval = [dict(id='a'), dict(id='c'), dict(id='b')] + else: + subnet_retval = [dict(id='c', segment_id='1'), + dict(id='a', segment_id='1')] port_retval = mock.Mock() self.plugin.get_network.return_value = network_retval self.plugin.get_subnets.return_value = subnet_retval self.plugin.get_ports.return_value = port_retval + if segmented_network: + self.segment_plugin.get_segments.return_value = [dict(id='1'), + dict(id='2')] + self.segment_plugin.get_segments_by_hosts.return_value = ['1'] retval = self.callbacks.get_network_info(mock.Mock(), network_id='a') self.assertEqual(retval, network_retval) - sorted_subnet_retval = [dict(id='a'), dict(id='b'), dict(id='c')] + if not routed_network: + sorted_subnet_retval = [dict(id='a'), dict(id='b'), dict(id='c')] + else: + sorted_subnet_retval = [dict(id='a', segment_id='1'), + dict(id='c', segment_id='1')] self.assertEqual(retval['subnets'], sorted_subnet_retval) self.assertEqual(retval['ports'], port_retval) + def test_get_network_info(self): + self._test_get_network_info() + + def test_get_network_info_with_routed_network(self): + self.get_service_plugins.return_value = { + 'segments': self.segment_plugin + } + self._test_get_network_info(segmented_network=True, + routed_network=True) + + def test_get_network_info_with_segmented_network_but_not_routed(self): + self.get_service_plugins.return_value = { + 'segments': self.segment_plugin + } + self._test_get_network_info(segmented_network=True) + + def test_get_network_info_with_non_segmented_network(self): + self.get_service_plugins.return_value = { + 'segments': self.segment_plugin + } + self._test_get_network_info() + def test_update_dhcp_port_verify_port_action_port_dict(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/handlers/test_l3_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from neutron_lib import constants from oslo_config import cfg from neutron.api.rpc.handlers import l3_rpc -from neutron.common import constants from neutron import context from neutron import manager from neutron.tests.unit.db import test_db_base_plugin_v2 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/rpc/handlers/test_resources_rpc.py 2016-08-03 20:10:34.000000000 +0000 @@ -28,30 +28,45 @@ from neutron.tests import base +TEST_EVENT = 'test_event' +TEST_VERSION = '1.0' + + def _create_test_dict(uuid=None): return {'id': uuid or uuidutils.generate_uuid(), 'field': 'foo'} -def _create_test_resource(context=None): +def _create_test_resource(context=None, resource_cls=None): + resource_cls = resource_cls or FakeResource resource_dict = _create_test_dict() - resource = FakeResource(context, **resource_dict) + resource = resource_cls(context, **resource_dict) resource.obj_reset_changes() return resource -class FakeResource(objects_base.NeutronObject): - # Version 1.0: Initial version - VERSION = '1.0' +class BaseFakeResource(objects_base.NeutronObject): + @classmethod + def get_objects(cls, context, **kwargs): + return list() + + +class FakeResource(BaseFakeResource): + VERSION = TEST_VERSION fields = { 'id': obj_fields.UUIDField(), 'field': obj_fields.StringField() } - @classmethod - def get_objects(cls, context, **kwargs): - return list() + +class FakeResource2(BaseFakeResource): + VERSION = TEST_VERSION + + fields = { + 'id': obj_fields.UUIDField(), + 'field': obj_fields.StringField() + } class ResourcesRpcBaseTestCase(base.BaseTestCase): @@ -63,6 +78,21 @@ fixture.VersionedObjectRegistryFixture()) self.context = context.get_admin_context() + mock.patch.object(resources_rpc.resources, + 'is_valid_resource_type').start() + mock.patch.object(resources_rpc.resources, 'get_resource_cls', + side_effect=self._get_resource_cls).start() + + self.resource_objs = [_create_test_resource(self.context) + for _ in range(2)] + self.resource_objs2 = [_create_test_resource(self.context, + FakeResource2) + for _ in range(2)] + + @staticmethod + def _get_resource_cls(resource_type): + return {FakeResource.obj_name(): FakeResource, + FakeResource2.obj_name(): FakeResource2}.get(resource_type) class _ValidateResourceTypeTestCase(base.BaseTestCase): @@ -99,9 +129,6 @@ def setUp(self): super(ResourcesPullRpcApiTestCase, self).setUp() - mock.patch.object(resources_rpc, '_validate_resource_type').start() - mock.patch('neutron.api.rpc.callbacks.resources.get_resource_cls', - return_value=FakeResource).start() self.rpc = resources_rpc.ResourcesPullRpcApi() mock.patch.object(self.rpc, 'client').start() self.cctxt_mock = self.rpc.client.prepare.return_value @@ -120,7 +147,7 @@ self.cctxt_mock.call.assert_called_once_with( self.context, 'pull', resource_type='FakeResource', - version=FakeResource.VERSION, resource_id=resource_id) + version=TEST_VERSION, resource_id=resource_id) self.assertEqual(expected_obj, result) def test_pull_resource_not_found(self): @@ -162,7 +189,7 @@ return_value=self.resource_obj) as registry_mock: primitive = self.callbacks.pull( self.context, resource_type=FakeResource.obj_name(), - version=FakeResource.VERSION, + version=TEST_VERSION, resource_id=self.resource_obj.id) registry_mock.assert_called_once_with( 'FakeResource', self.resource_obj.id, context=self.context) @@ -182,58 +209,96 @@ class ResourcesPushRpcApiTestCase(ResourcesRpcBaseTestCase): + """Tests the neutron server side of the RPC interface.""" def setUp(self): super(ResourcesPushRpcApiTestCase, self).setUp() mock.patch.object(resources_rpc.n_rpc, 'get_client').start() - mock.patch.object(resources_rpc, '_validate_resource_type').start() self.rpc = resources_rpc.ResourcesPushRpcApi() self.cctxt_mock = self.rpc.client.prepare.return_value - self.resource_obj = _create_test_resource(self.context) + mock.patch.object(version_manager, 'get_resource_versions', + return_value=set([TEST_VERSION])).start() def test__prepare_object_fanout_context(self): expected_topic = topics.RESOURCE_TOPIC_PATTERN % { - 'resource_type': resources.get_resource_type(self.resource_obj), - 'version': self.resource_obj.VERSION} + 'resource_type': resources.get_resource_type( + self.resource_objs[0]), + 'version': TEST_VERSION} - with mock.patch.object(resources_rpc.resources, 'get_resource_cls', - return_value=FakeResource): - observed = self.rpc._prepare_object_fanout_context( - self.resource_obj, self.resource_obj.VERSION) + observed = self.rpc._prepare_object_fanout_context( + self.resource_objs[0], self.resource_objs[0].VERSION, '1.0') self.rpc.client.prepare.assert_called_once_with( - fanout=True, topic=expected_topic) + fanout=True, topic=expected_topic, version='1.0') self.assertEqual(self.cctxt_mock, observed) - def test_pushy(self): - with mock.patch.object(resources_rpc.resources, 'get_resource_cls', - return_value=FakeResource): - with mock.patch.object(version_manager, 'get_resource_versions', - return_value=set([FakeResource.VERSION])): - self.rpc.push( - self.context, self.resource_obj, 'TYPE') + def test_push_single_type(self): + self.rpc.push( + self.context, self.resource_objs, TEST_EVENT) + + self.cctxt_mock.cast.assert_called_once_with( + self.context, 'push', + resource_list=[resource.obj_to_primitive() + for resource in self.resource_objs], + event_type=TEST_EVENT) + + def test_push_mixed(self): + self.rpc.push( + self.context, self.resource_objs + self.resource_objs2, + event_type=TEST_EVENT) + + self.cctxt_mock.cast.assert_any_call( + self.context, 'push', + resource_list=[resource.obj_to_primitive() + for resource in self.resource_objs], + event_type=TEST_EVENT) + + self.cctxt_mock.cast.assert_any_call( + self.context, 'push', + resource_list=[resource.obj_to_primitive() + for resource in self.resource_objs2], + event_type=TEST_EVENT) + + def test_push_mitaka_backwardscompat(self): + #TODO(mangelajo) remove in Ocata, since the 'resource' parameter + # is just for backwards compatibility with Mitaka + # agents. + self.rpc.push( + self.context, [self.resource_objs[0]], TEST_EVENT) self.cctxt_mock.cast.assert_called_once_with( self.context, 'push', - resource=self.resource_obj.obj_to_primitive(), - event_type='TYPE') + resource=self.resource_objs[0].obj_to_primitive(), + event_type=TEST_EVENT) class ResourcesPushRpcCallbackTestCase(ResourcesRpcBaseTestCase): + """Tests the agent-side of the RPC interface.""" def setUp(self): super(ResourcesPushRpcCallbackTestCase, self).setUp() - mock.patch.object(resources_rpc, '_validate_resource_type').start() - mock.patch.object( - resources_rpc.resources, - 'get_resource_cls', return_value=FakeResource).start() - self.resource_obj = _create_test_resource(self.context) - self.resource_prim = self.resource_obj.obj_to_primitive() self.callbacks = resources_rpc.ResourcesPushRpcCallback() @mock.patch.object(resources_rpc.cons_registry, 'push') def test_push(self, reg_push_mock): self.obj_registry.register(FakeResource) - self.callbacks.push(self.context, self.resource_prim, 'TYPE') - reg_push_mock.assert_called_once_with(self.resource_obj.obj_name(), - self.resource_obj, 'TYPE') + self.callbacks.push(self.context, + resource_list=[resource.obj_to_primitive() + for resource in self.resource_objs], + event_type=TEST_EVENT) + reg_push_mock.assert_called_once_with(self.resource_objs[0].obj_name(), + self.resource_objs, + TEST_EVENT) + + @mock.patch.object(resources_rpc.cons_registry, 'push') + def test_push_mitaka_backwardscompat(self, reg_push_mock): + #TODO(mangelajo) remove in Ocata, since the 'resource' parameter + # is just for backwards compatibility with Mitaka + # agents. + self.obj_registry.register(FakeResource) + self.callbacks.push(self.context, + resource=self.resource_objs[0].obj_to_primitive(), + event_type=TEST_EVENT) + reg_push_mock.assert_called_once_with(self.resource_objs[0].obj_name(), + [self.resource_objs[0]], + TEST_EVENT) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/test_api_common.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/test_api_common.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/test_api_common.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/test_api_common.py 2016-08-29 20:05:49.000000000 +0000 @@ -20,7 +20,6 @@ from neutron.api import api_common as common from neutron.api.v2 import base as base_v2 -from neutron.common import exceptions as n_exc from neutron.tests import base @@ -98,8 +97,9 @@ params) def test_convert_exception_to_http_exc_multiple_different_codes(self): - e = n_exc.MultipleExceptions([exceptions.NetworkInUse(net_id='nid'), - exceptions.PortNotFound(port_id='pid')]) + e = exceptions.MultipleExceptions( + [exceptions.NetworkInUse(net_id='nid'), + exceptions.PortNotFound(port_id='pid')]) conv = common.convert_exception_to_http_exc(e, base_v2.FAULT_MAP, None) self.assertIsInstance(conv, exc.HTTPConflict) self.assertEqual( @@ -109,8 +109,9 @@ jsonutils.loads(conv.body)['NeutronError']['message']) def test_convert_exception_to_http_exc_multiple_same_codes(self): - e = n_exc.MultipleExceptions([exceptions.NetworkNotFound(net_id='nid'), - exceptions.PortNotFound(port_id='pid')]) + e = exceptions.MultipleExceptions( + [exceptions.NetworkNotFound(net_id='nid'), + exceptions.PortNotFound(port_id='pid')]) conv = common.convert_exception_to_http_exc(e, base_v2.FAULT_MAP, None) self.assertIsInstance(conv, exc.HTTPNotFound) self.assertEqual( @@ -118,7 +119,7 @@ jsonutils.loads(conv.body)['NeutronError']['message']) def test_convert_exception_to_http_exc_multiple_empty_inner(self): - e = n_exc.MultipleExceptions([]) + e = exceptions.MultipleExceptions([]) conv = common.convert_exception_to_http_exc(e, base_v2.FAULT_MAP, None) self.assertIsInstance(conv, exc.HTTPInternalServerError) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/test_extensions.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/test_extensions.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/test_extensions.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/test_extensions.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,7 +14,9 @@ # under the License. import abc +import copy +import fixtures import mock from oslo_config import cfg from oslo_log import log as logging @@ -22,6 +24,7 @@ from oslo_service import wsgi as base_wsgi import routes import six +import testtools import webob import webob.exc as webexc import webtest @@ -49,6 +52,27 @@ extensions_path = ':'.join(neutron.tests.unit.extensions.__path__) +class CustomExtensionCheckMapMemento(fixtures.Fixture): + """Create a copy of the custom extension support check map so it can be + restored during test cleanup. + """ + + def _setUp(self): + self._map_contents_backup = copy.deepcopy( + extensions.EXTENSION_SUPPORTED_CHECK_MAP + ) + self._plugin_agnostic_extensions_backup = set( + extensions._PLUGIN_AGNOSTIC_EXTENSIONS + ) + self.addCleanup(self._restore) + + def _restore(self): + extensions.EXTENSION_SUPPORTED_CHECK_MAP = self._map_contents_backup + extensions._PLUGIN_AGNOSTIC_EXTENSIONS = ( + self._plugin_agnostic_extensions_backup + ) + + class ExtensionsTestApp(base_wsgi.Router): def __init__(self, options=None): @@ -602,7 +626,14 @@ default_ext = list(constants.DEFAULT_SERVICE_PLUGINS.values())[0] ext_mgr.add_extension(ext_stubs.StubExtensionWithReqs(default_ext)) ext_mgr.extend_resources("2.0", attr_map) - self.assertIn(default_ext, ext_mgr.extensions) + # none of the default extensions should be loaded as their + # requirements are not satisfied, and yet we do not fail. + self.assertFalse(ext_mgr.extensions) + + def test__check_faulty_extensions_raise_not_default_ext(self): + ext_mgr = extensions.ExtensionManager('') + with testtools.ExpectedException(exceptions.ExtensionsNotFound): + ext_mgr._check_faulty_extensions(set(['foo'])) def test_invalid_extensions_are_not_registered(self): @@ -793,6 +824,47 @@ extensions.PluginAwareExtensionManager, '', plugin_info) + def test_custom_supported_implementation(self): + self.useFixture(CustomExtensionCheckMapMemento()) + + class FakePlugin(object): + pass + + class FakeExtension(ext_stubs.StubExtension): + extensions.register_custom_supported_check( + 'stub_extension', lambda: True, plugin_agnostic=True + ) + + ext = FakeExtension() + + plugin_info = {constants.CORE: FakePlugin()} + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ext) + self.assertIn("stub_extension", ext_mgr.extensions) + + extensions.register_custom_supported_check( + 'stub_extension', lambda: False, plugin_agnostic=True + ) + ext_mgr = extensions.PluginAwareExtensionManager('', plugin_info) + ext_mgr.add_extension(ext) + self.assertNotIn("stub_extension", ext_mgr.extensions) + + def test_custom_supported_implementation_plugin_specific(self): + self.useFixture(CustomExtensionCheckMapMemento()) + + class FakePlugin(object): + pass + + class FakeExtension(ext_stubs.StubExtension): + extensions.register_custom_supported_check( + 'stub_plugin_extension', lambda: True, plugin_agnostic=False + ) + + plugin_info = {constants.CORE: FakePlugin()} + self.assertRaises( + exceptions.ExtensionsNotFound, + extensions.PluginAwareExtensionManager, '', plugin_info) + class ExtensionControllerTest(testlib_api.WebTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/v2/test_base.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/v2/test_base.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/v2/test_base.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/v2/test_base.py 2016-08-29 20:05:49.000000000 +0000 @@ -1099,6 +1099,11 @@ self._test_update(tenant_id + "bad", tenant_id, exc.HTTPNotFound.code, expect_errors=True) + def test_update_keystone_no_tenant(self): + tenant_id = _uuid() + self._test_update(tenant_id, None, + exc.HTTPNotFound.code, expect_errors=True) + def test_update_readonly_field(self): data = {'network': {'status': "NANANA"}} res = self.api.put(_get_path('networks', id=_uuid()), @@ -1329,6 +1334,11 @@ for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events): self.assertEqual('INFO', msg['priority']) self.assertEqual(event, msg['event_type']) + if opname == 'delete' and event == 'network.delete.end': + self.assertIn('payload', msg) + resource = msg['payload'] + self.assertIn('network_id', resource) + self.assertIn('network', resource) self.assertEqual(expected_code, res.status_int) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/api/v2/test_resource.py neutron-9.0.0~b3~dev557/neutron/tests/unit/api/v2/test_resource.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/api/v2/test_resource.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/api/v2/test_resource.py 2016-08-29 20:05:49.000000000 +0000 @@ -21,6 +21,7 @@ from neutron._i18n import _ from neutron.api.v2 import resource as wsgi_resource +from neutron.common import utils from neutron import context from neutron.tests import base from neutron import wsgi @@ -42,6 +43,12 @@ result = request.get_content_type() self.assertEqual("application/json", result) + def test_content_type_with_partial_matched_string(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/j" + result = request.best_match_content_type() + self.assertEqual("application/json", result) + def test_content_type_from_accept(self): content_type = 'application/json' request = wsgi.Request.blank('/tests/123') @@ -92,7 +99,7 @@ def test_request_context_elevated(self): user_context = context.Context( - 'fake_user', 'fake_project', admin=False) + 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) @@ -280,6 +287,21 @@ res = resource.get('', extra_environ=environ) self.assertEqual(200, res.status_int) + def _test_unhandled_error_logs_details(self, e, expected_details): + with mock.patch.object(wsgi_resource.LOG, 'exception') as log: + self._make_request_with_side_effect(side_effect=e) + log.assert_called_with( + mock.ANY, {'action': mock.ANY, 'details': expected_details}) + + def test_unhandled_error_logs_attached_details(self): + e = Exception() + utils.attach_exc_details(e, 'attached_details') + self._test_unhandled_error_logs_details(e, 'attached_details') + + def test_unhandled_error_logs_no_attached_details(self): + e = Exception() + self._test_unhandled_error_logs_details(e, 'No details.') + def test_status_204(self): controller = mock.MagicMock() controller.test = lambda request: {'foo': 'bar'} diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/callbacks/test_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/callbacks/test_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/callbacks/test_manager.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/callbacks/test_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -22,6 +22,19 @@ from neutron.tests import base +class ObjectWithCallback(object): + + def __init__(self): + self.counter = 0 + + def callback(self, *args, **kwargs): + self.counter += 1 + + +class GloriousObjectWithCallback(ObjectWithCallback): + pass + + def callback_1(*args, **kwargs): callback_1.counter += 1 callback_id_1 = manager._get_id(callback_1) @@ -214,3 +227,19 @@ resources.ROUTER, events.BEFORE_DELETE, mock.ANY) self.assertEqual(2, callback_1.counter) self.assertEqual(1, callback_2.counter) + + def test_object_instances_as_subscribers(self): + """Ensures that the manager doesn't think these are equivalent.""" + a = GloriousObjectWithCallback() + b = ObjectWithCallback() + c = ObjectWithCallback() + for o in (a, b, c): + self.manager.subscribe( + o.callback, resources.PORT, events.BEFORE_CREATE) + # ensure idempotency remains for a single object + self.manager.subscribe( + o.callback, resources.PORT, events.BEFORE_CREATE) + self.manager.notify(resources.PORT, events.BEFORE_CREATE, mock.ANY) + self.assertEqual(1, a.counter) + self.assertEqual(1, b.counter) + self.assertEqual(1, c.counter) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/cmd/test_netns_cleanup.py neutron-9.0.0~b3~dev557/neutron/tests/unit/cmd/test_netns_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/cmd/test_netns_cleanup.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/cmd/test_netns_cleanup.py 2016-08-29 20:05:49.000000000 +0000 @@ -20,6 +20,12 @@ class TestNetnsCleanup(base.BaseTestCase): + def setUp(self): + super(TestNetnsCleanup, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) def test_kill_dhcp(self, dhcp_active=True): conf = mock.Mock() @@ -61,8 +67,8 @@ with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.namespace_is_empty.return_value = is_empty - self.assertEqual(util.eligible_for_deletion(conf, ns, force), - expected) + self.assertEqual(expected, + util.eligible_for_deletion(conf, ns, force)) expected_calls = [mock.call(namespace=ns)] if not force: @@ -95,10 +101,8 @@ with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') as ip_wrap: ip_wrap.return_value.namespace_is_empty.return_value = True - self.assertEqual(True, - util.eligible_for_deletion(conf, ns_dhcp, False)) - self.assertEqual(False, - util.eligible_for_deletion(conf, ns_l3, False)) + self.assertTrue(util.eligible_for_deletion(conf, ns_dhcp, False)) + self.assertFalse(util.eligible_for_deletion(conf, ns_l3, False)) expected_calls = [mock.call(namespace=ns_dhcp), mock.call().namespace_is_empty()] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/cmd/test_ovs_cleanup.py neutron-9.0.0~b3~dev557/neutron/tests/unit/cmd/test_ovs_cleanup.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/cmd/test_ovs_cleanup.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/cmd/test_ovs_cleanup.py 2016-08-03 20:10:34.000000000 +0000 @@ -26,6 +26,7 @@ class TestOVSCleanup(base.BaseTestCase): + @mock.patch('neutron.agent.ovsdb.native.connection.Connection.start') @mock.patch('neutron.common.config.setup_logging') @mock.patch('neutron.cmd.ovs_cleanup.setup_conf') @mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges') @@ -33,7 +34,7 @@ @mock.patch.object(util, 'collect_neutron_ports') @mock.patch.object(util, 'delete_neutron_ports') def test_main(self, mock_delete, mock_collect, mock_ovs, - mock_get_bridges, mock_conf, mock_logging): + mock_get_bridges, mock_conf, mock_logging, mock_conn): bridges = ['br-int', 'br-ex'] ports = ['p1', 'p2', 'p3'] conf = mock.Mock() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_cache_utils.py neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_cache_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_cache_utils.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_cache_utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -100,7 +100,7 @@ self.decor._cache.get.return_value = self.not_cached retval = self.decor.func(*args, **kwargs) self.decor._cache.set.assert_called_once_with( - expected_key, self.decor.func_retval) + str(expected_key), self.decor.func_retval) self.assertEqual(self.decor.func_retval, retval) def test_cache_hit(self): @@ -110,7 +110,7 @@ retval = self.decor.func(*args, **kwargs) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor._cache.get.return_value, retval) - self.decor._cache.get.assert_called_once_with(expected_key) + self.decor._cache.get.assert_called_once_with(str(expected_key)) def test_get_unhashable(self): expected_key = (self.func_name, [1], 2) @@ -118,7 +118,7 @@ retval = self.decor.func([1], 2) self.assertFalse(self.decor._cache.set.called) self.assertEqual(self.decor.func_retval, retval) - self.decor._cache.get.assert_called_once_with(expected_key) + self.decor._cache.get.assert_called_once_with(str(expected_key)) def test_missing_cache(self): delattr(self.decor, '_cache') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_ipv6_utils.py neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_ipv6_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_ipv6_utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_ipv6_utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,8 +15,8 @@ import collections import mock +from neutron_lib import constants -from neutron.common import constants from neutron.common import ipv6_utils from neutron.tests import base from neutron.tests import tools diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -23,12 +23,11 @@ import testtools from neutron.common import rpc -from neutron import context from neutron.tests import base CONF = cfg.CONF -CONF.import_opt('state_path', 'neutron.common.config') +CONF.import_opt('state_path', 'neutron.conf.common') class RPCFixture(fixtures.Fixture): @@ -234,43 +233,51 @@ context.to_dict.assert_called_once_with() - @mock.patch.object(context, 'Context') - def test_deserialize_context(self, mock_con): - context = mock.Mock() - context.copy.return_value = {'foo': 'bar', - 'user_id': 1, - 'tenant_id': 1} - - self.ser.deserialize_context(context) - mock_con.assert_called_once_with(1, 1, foo='bar') - - @mock.patch.object(context, 'Context') - def test_deserialize_context_no_user_id(self, mock_con): - context = mock.Mock() - context.copy.return_value = {'foo': 'bar', - 'user': 1, - 'tenant_id': 1} - - self.ser.deserialize_context(context) - mock_con.assert_called_once_with(1, 1, foo='bar') + @mock.patch('neutron.policy.check_is_advsvc', return_val=False) + @mock.patch('neutron.policy.check_is_admin', return_val=False) + def test_deserialize_context(self, m, n): + context_dict = {'foo': 'bar', + 'user_id': 1, + 'tenant_id': 1} + + c = self.ser.deserialize_context(context_dict) + + self.assertEqual(1, c.user_id) + self.assertEqual(1, c.project_id) + + @mock.patch('neutron.policy.check_is_advsvc', return_val=False) + @mock.patch('neutron.policy.check_is_admin', return_val=False) + def test_deserialize_context_no_user_id(self, m, n): + context_dict = {'foo': 'bar', + 'user': 1, + 'tenant_id': 1} + + c = self.ser.deserialize_context(context_dict) + + self.assertEqual(1, c.user_id) + self.assertEqual(1, c.project_id) + + @mock.patch('neutron.policy.check_is_advsvc', return_val=False) + @mock.patch('neutron.policy.check_is_admin', return_val=False) + def test_deserialize_context_no_tenant_id(self, m, n): + context_dict = {'foo': 'bar', + 'user_id': 1, + 'project_id': 1} + + c = self.ser.deserialize_context(context_dict) + + self.assertEqual(1, c.user_id) + self.assertEqual(1, c.project_id) + + @mock.patch('neutron.policy.check_is_advsvc', return_val=False) + @mock.patch('neutron.policy.check_is_admin', return_val=False) + def test_deserialize_context_no_ids(self, m, n): + context_dict = {'foo': 'bar'} - @mock.patch.object(context, 'Context') - def test_deserialize_context_no_tenant_id(self, mock_con): - context = mock.Mock() - context.copy.return_value = {'foo': 'bar', - 'user_id': 1, - 'project_id': 1} - - self.ser.deserialize_context(context) - mock_con.assert_called_once_with(1, 1, foo='bar') - - @mock.patch.object(context, 'Context') - def test_deserialize_context_no_ids(self, mock_con): - context = mock.Mock() - context.copy.return_value = {'foo': 'bar'} + c = self.ser.deserialize_context(context_dict) - self.ser.deserialize_context(context) - mock_con.assert_called_once_with(None, None, foo='bar') + self.assertIsNone(c.user_id) + self.assertIsNone(c.project_id) class ServiceTestCase(base.DietTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_utils.py neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/common/test_utils.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/common/test_utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,7 +13,9 @@ # under the License. import errno +import os.path import re +import sys import eventlet import mock @@ -30,6 +32,7 @@ from neutron.plugins.common import utils as plugin_utils from neutron.tests import base from neutron.tests.common import helpers +from neutron.tests.unit import tests class TestParseMappings(base.BaseTestCase): @@ -144,20 +147,16 @@ class UtilTestParseVlanRanges(base.BaseTestCase): _err_prefix = "Invalid network VLAN range: '" - _err_too_few = "' - 'need more than 2 values to unpack'." - _err_too_many_prefix = "' - 'too many values to unpack" - _err_not_int = "' - 'invalid literal for int() with base 10: '%s''." + _err_bad_count = "' - 'Need exactly two values for VLAN range'." _err_bad_vlan = "' - '%s is not a valid VLAN tag'." _err_range = "' - 'End of VLAN range is less than start of VLAN range'." - def _range_too_few_err(self, nv_range): - return self._err_prefix + nv_range + self._err_too_few + def _range_err_bad_count(self, nv_range): + return self._err_prefix + nv_range + self._err_bad_count - def _range_too_many_err_prefix(self, nv_range): - return self._err_prefix + nv_range + self._err_too_many_prefix - - def _vlan_not_int_err(self, nv_range, vlan): - return self._err_prefix + nv_range + (self._err_not_int % vlan) + def _range_invalid_vlan(self, nv_range, n): + vlan = nv_range.split(':')[n] + return self._err_prefix + nv_range + (self._err_bad_vlan % vlan) def _nrange_invalid_vlan(self, nv_range, n): vlan = nv_range.split(':')[n] @@ -268,34 +267,31 @@ def test_parse_one_net_incomplete_range(self): config_str = "net1:100" - expected_msg = self._range_too_few_err(config_str) + expected_msg = self._range_err_bad_count(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) def test_parse_one_net_range_too_many(self): config_str = "net1:100:150:200" - expected_msg_prefix = self._range_too_many_err_prefix(config_str) + expected_msg = self._range_err_bad_count(config_str) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - # The error message is not same in Python 2 and Python 3. In Python 3, - # it depends on the amount of values used when unpacking, so it cannot - # be predicted as a fixed string. - self.assertTrue(str(err).startswith(expected_msg_prefix)) + self.assertEqual(expected_msg, str(err)) def test_parse_one_net_vlan1_not_int(self): config_str = "net1:foo:199" - expected_msg = self._vlan_not_int_err(config_str, 'foo') + expected_msg = self._range_invalid_vlan(config_str, 1) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) def test_parse_one_net_vlan2_not_int(self): config_str = "net1:100:bar" - expected_msg = self._vlan_not_int_err(config_str, 'bar') + expected_msg = self._range_invalid_vlan(config_str, 2) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) def test_parse_one_net_and_max_range(self): config_str = "net1:1:4094" @@ -307,14 +303,14 @@ expected_msg = self._nrange_invalid_vlan(config_str, 1) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) def test_parse_one_net_range_bad_vlan2(self): config_str = "net1:4000:4999" expected_msg = self._nrange_invalid_vlan(config_str, 2) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_one, config_str) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) class TestParseVlanRangeList(UtilTestParseVlanRanges): @@ -355,18 +351,18 @@ def test_parse_two_nets_bad_vlan_range1(self): config_list = ["net1:100", "net2:200:299"] - expected_msg = self._range_too_few_err(config_list[0]) + expected_msg = self._range_err_bad_count(config_list[0]) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_list, config_list) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) def test_parse_two_nets_vlan_not_int2(self): config_list = ["net1:100:199", "net2:200:0x200"] - expected_msg = self._vlan_not_int_err(config_list[1], '0x200') + expected_msg = self._range_invalid_vlan(config_list[1], 2) err = self.assertRaises(n_exc.NetworkVlanRangeError, self.parse_list, config_list) - self.assertEqual(str(err), expected_msg) + self.assertEqual(expected_msg, str(err)) def test_parse_two_nets_and_append_1_2(self): config_list = ["net1:100:199", @@ -410,8 +406,8 @@ {"key2": "value2"}, {"key4": "value4"}] added, removed = utils.diff_list_of_dict(old_list, new_list) - self.assertEqual(added, [dict(key4="value4")]) - self.assertEqual(removed, [dict(key3="value3")]) + self.assertEqual([dict(key4="value4")], added) + self.assertEqual([dict(key3="value3")], removed) class TestDict2Tuples(base.BaseTestCase): @@ -750,3 +746,49 @@ for addr in ('XXXX', 'ypp', 'g3:vvv'): with testtools.ExpectedException(netaddr.core.AddrFormatError): utils.AuthenticIPNetwork(addr) + + +class TestExcDetails(base.BaseTestCase): + + def test_attach_exc_details(self): + e = Exception() + utils.attach_exc_details(e, 'details') + self.assertEqual('details', utils.extract_exc_details(e)) + + def test_attach_exc_details_with_interpolation(self): + e = Exception() + utils.attach_exc_details(e, 'details: %s', 'foo') + self.assertEqual('details: foo', utils.extract_exc_details(e)) + + def test_attach_exc_details_with_None_interpolation(self): + e = Exception() + utils.attach_exc_details(e, 'details: %s', None) + self.assertEqual( + 'details: %s' % str(None), utils.extract_exc_details(e)) + + def test_attach_exc_details_with_multiple_interpolation(self): + e = Exception() + utils.attach_exc_details( + e, 'details: %s, %s', ('foo', 'bar')) + self.assertEqual('details: foo, bar', utils.extract_exc_details(e)) + + def test_attach_exc_details_with_dict_interpolation(self): + e = Exception() + utils.attach_exc_details( + e, 'details: %(foo)s, %(bar)s', {'foo': 'foo', 'bar': 'bar'}) + self.assertEqual('details: foo, bar', utils.extract_exc_details(e)) + + def test_extract_exc_details_no_details_attached(self): + self.assertIsInstance( + utils.extract_exc_details(Exception()), six.text_type) + + +class ImportModulesRecursivelyTestCase(base.BaseTestCase): + + def test_object_modules(self): + example_module = 'neutron.tests.unit.tests.example.dir.example_module' + sys.modules.pop(example_module, None) + modules = utils.import_modules_recursively( + os.path.dirname(tests.__file__)) + self.assertIn(example_module, modules) + self.assertIn(example_module, sys.modules) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/metering/test_metering_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/metering/test_metering_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/metering/test_metering_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/metering/test_metering_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -155,6 +155,15 @@ for k, v, in keys: self.assertEqual(metering_label['metering_label'][k], v) + def test_update_metering_label(self): + name = 'my label' + description = 'my metering label' + data = {'metering_label': {}} + with self.metering_label(name, description) as metering_label: + metering_label_id = metering_label['metering_label']['id'] + self._update('metering-labels', metering_label_id, data, + webob.exc.HTTPNotImplemented.code) + def test_delete_metering_label(self): name = 'my label' description = 'my metering label' @@ -195,6 +204,20 @@ for k, v, in keys: self.assertEqual(label_rule['metering_label_rule'][k], v) + def test_update_metering_label_rule(self): + name = 'my label' + description = 'my metering label' + direction = 'egress' + remote_ip_prefix = '192.168.0.0/24' + data = {'metering_label_rule': {}} + with self.metering_label(name, description) as metering_label, \ + self.metering_label_rule( + metering_label['metering_label']['id'], + direction, remote_ip_prefix) as label_rule: + rule_id = label_rule['metering_label_rule']['id'] + self._update('metering-label-rules', rule_id, data, + webob.exc.HTTPNotImplemented.code) + def test_delete_metering_label_rule(self): name = 'my label' description = 'my metering label' diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_agentschedulers_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_agentschedulers_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_agentschedulers_db.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_agentschedulers_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,7 +24,6 @@ from webob import exc from neutron.api import extensions -from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import l3_rpc @@ -225,14 +224,17 @@ AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): fmt = 'json' - plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') def setUp(self): self.useFixture(tools.AttributeMapMemento()) if self.l3_plugin: - service_plugins = {'l3_plugin_name': self.l3_plugin} + service_plugins = { + 'l3_plugin_name': self.l3_plugin, + 'flavors_plugin_name': 'neutron.services.flavors.' + 'flavors_plugin.FlavorsPlugin' + } else: service_plugins = None # NOTE(ivasilevskaya) mocking this way allows some control over mocked @@ -241,7 +243,7 @@ mock.patch('neutron.common.rpc.get_client' ).start().return_value = self.client_mock super(OvsAgentSchedulerTestCaseBase, self).setUp( - self.plugin_str, service_plugins=service_plugins) + 'ml2', service_plugins=service_plugins) mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() @@ -966,39 +968,41 @@ self.assertIn(router_ids[0], [r['id'] for r in ret_a]) self.assertIn(router_ids[2], [r['id'] for r in ret_a]) - def test_router_auto_schedule_for_specified_routers(self): + def test_sync_router(self): + l3_rpc_cb = l3_rpc.L3RpcCallback() + self._register_agent_states() + hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) - def _sync_router_with_ids(router_ids, exp_synced, exp_hosted, host_id): + with self.router() as r1: ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA, - router_ids=router_ids) - self.assertEqual(exp_synced, len(ret_a)) - for r in router_ids: - self.assertIn(r, [r['id'] for r in ret_a]) - host_routers = self._list_routers_hosted_by_l3_agent(host_id) - num_host_routers = len(host_routers['routers']) - self.assertEqual(exp_hosted, num_host_routers) + router_ids=[r1['router']['id']]) + # Not return router to agent if the router is not bound to it. + self.assertEqual([], ret_a) + host_routers = self._list_routers_hosted_by_l3_agent(hosta_id) + # No router will be auto scheduled. + self.assertEqual(0, len(host_routers['routers'])) + def test_sync_dvr_router(self): l3_rpc_cb = l3_rpc.L3RpcCallback() - self._register_agent_states() - hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) + dvr_agents = self._register_dvr_agents() - with self.router() as v1,\ - self.router() as v2,\ - self.router() as v3,\ - self.router() as v4: - routers = (v1, v2, v3, v4) - router_ids = [r['router']['id'] for r in routers] - # Sync router1 (router1 is scheduled) - _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id) - # Sync router1 only (no router is scheduled) - _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id) - # Schedule router2 - _sync_router_with_ids([router_ids[1]], 1, 2, hosta_id) - # Sync router2 and router4 (router4 is scheduled) - _sync_router_with_ids([router_ids[1], router_ids[3]], - 2, 3, hosta_id) - # Sync all routers (router3 is scheduled) - _sync_router_with_ids(router_ids, 4, 4, hosta_id) + with self.router() as r1, \ + mock.patch.object(self.l3plugin, 'get_subnet_ids_on_router', + return_value=['fake_subnet_id']), \ + mock.patch.object(self.l3plugin, + '_check_dvr_serviceable_ports_on_host', + return_value=True): + for l3_agent in dvr_agents: + host = l3_agent['host'] + ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=host, + router_ids=[r1['router']['id']]) + router_ids = [r['id'] for r in ret_a] + # Return router to agent if there is dvr service port in agent. + self.assertIn(r1['router']['id'], router_ids) + host_routers = self._list_routers_hosted_by_l3_agent( + l3_agent['id']) + # No router will be auto scheduled. + self.assertEqual(0, len(host_routers['routers'])) def test_router_without_l3_agents(self): with self.subnet() as s: @@ -1045,9 +1049,9 @@ def test_dvr_router_csnat_rescheduling(self): helpers.register_l3_agent( - host=L3_HOSTA, agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) helpers.register_l3_agent( - host=L3_HOSTB, agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) @@ -1072,9 +1076,9 @@ def test_dvr_router_manual_rescheduling(self): helpers.register_l3_agent( - host=L3_HOSTA, agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) helpers.register_l3_agent( - host=L3_HOSTB, agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) with self.subnet() as s: net_id = s['subnet']['network_id'] self._set_net_external(net_id) @@ -1290,15 +1294,14 @@ class OvsDhcpAgentNotifierTestCase(test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): - plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' - def setUp(self): self.useFixture(tools.AttributeMapMemento()) - super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str) + super(OvsDhcpAgentNotifierTestCase, self).setUp('ml2') mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() - self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() + plugin = manager.NeutronManager.get_plugin() + self.dhcp_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP] self.dhcp_notifier_cast = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI._cast_message').start() @@ -1435,7 +1438,6 @@ test_agent.AgentDBTestMixIn, AgentSchedulerTestMixIn, test_plugin.NeutronDbPluginV2TestCase): - plugin_str = 'neutron.plugins.ml2.plugin.Ml2Plugin' l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatAgentSchedulingServicePlugin') @@ -1450,11 +1452,15 @@ self.useFixture(tools.AttributeMapMemento()) if self.l3_plugin: - service_plugins = {'l3_plugin_name': self.l3_plugin} + service_plugins = { + 'l3_plugin_name': self.l3_plugin, + 'flavors_plugin_name': 'neutron.services.flavors.' + 'flavors_plugin.FlavorsPlugin' + } else: service_plugins = None super(OvsL3AgentNotifierTestCase, self).setUp( - self.plugin_str, service_plugins=service_plugins) + 'ml2', service_plugins=service_plugins) ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) self.adminContext = context.get_admin_context() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_api.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_api.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_api.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_api.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,11 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import mock +from neutron_lib import exceptions +from oslo_config import cfg from oslo_db import exception as db_exc +import osprofiler +import sqlalchemy from sqlalchemy.orm import exc import testtools -from neutron.common import exceptions from neutron.db import api as db_api from neutron.tests import base @@ -69,6 +73,10 @@ e = exc.StaleDataError() self.assertIsNone(self._decorated_function(1, e)) + def test_dbconnection_error_caught(self): + e = db_exc.DBConnectionError() + self.assertIsNone(self._decorated_function(1, e)) + def test_multi_exception_contains_retry(self): e = exceptions.MultipleExceptions( [ValueError(), db_exc.RetryRequest(TypeError())]) @@ -92,3 +100,16 @@ e = db_exc.DBError("(pymysql.err.InternalError) (1305, u'SAVEPOINT " "sa_savepoint_1 does not exist')") self.assertIsNone(self._decorated_function(1, e)) + + +class TestCommonDBfunctions(base.BaseTestCase): + + def test_set_hook(self): + with mock.patch.object(osprofiler.sqlalchemy, + 'add_tracing') as profiler: + cfg.CONF.set_override('enabled', True, group='profiler') + cfg.CONF.set_override('trace_sqlalchemy', True, group='profiler') + engine_mock = mock.Mock() + db_api.set_hook(engine_mock) + profiler.assert_called_with(sqlalchemy, engine_mock, + 'neutron.db') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_db_base_plugin_v2.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_db_base_plugin_v2.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_db_base_plugin_v2.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_db_base_plugin_v2.py 2016-08-29 20:05:49.000000000 +0000 @@ -50,8 +50,9 @@ from neutron.db import db_base_plugin_common from neutron.db import ipam_backend_mixin from neutron.db import l3_db +from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 -from neutron.db import securitygroups_db as sgdb +from neutron.db import standard_attr from neutron import manager from neutron.tests import base from neutron.tests import tools @@ -332,7 +333,8 @@ for arg in ('ip_version', 'tenant_id', 'subnetpool_id', 'prefixlen', 'enable_dhcp', 'allocation_pools', 'segment_id', 'dns_nameservers', 'host_routes', - 'shared', 'ipv6_ra_mode', 'ipv6_address_mode'): + 'shared', 'ipv6_ra_mode', 'ipv6_address_mode', + 'service_types'): # Arg must be present and not null (but can be false) if kwargs.get(arg) is not None: data['subnet'][arg] = kwargs[arg] @@ -625,6 +627,7 @@ ipv6_ra_mode=None, ipv6_address_mode=None, tenant_id=None, + service_types=None, set_context=False): with optional_ctx(network, self.network, set_context=set_context, @@ -1437,7 +1440,7 @@ with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: self.assertTrue( ipv6_utils.is_auto_address_subnet(subnet['subnet'])) self.check_update_port_mac(subnet=subnet) @@ -1654,7 +1657,7 @@ with self.subnet( cidr='2607:f0d0:1002:51::/64', ip_version=6, - ipv6_address_mode=n_const.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC, gateway_ip=constants.ATTR_NOT_SPECIFIED) as subnet: with self.port(subnet=subnet) as port: ips = port['port']['fixed_ips'] @@ -1858,7 +1861,7 @@ def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): with self.network(name='net') as network: subnet = self._make_v6_subnet( - network, n_const.IPV6_SLAAC, ipv6_pd=True) + network, constants.IPV6_SLAAC, ipv6_pd=True) net_id = subnet['subnet']['network_id'] subnet_id = subnet['subnet']['id'] # update subnet with new prefix @@ -1876,7 +1879,7 @@ def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): with self.network(name='net') as network: subnet = self._make_v6_subnet( - network, n_const.IPV6_SLAAC, ipv6_pd=True) + network, constants.IPV6_SLAAC, ipv6_pd=True) net_id = subnet['subnet']['network_id'] subnet_id = subnet['subnet']['id'] # update subnet with new prefix @@ -1904,7 +1907,7 @@ def test_update_port_invalid_subnet_v6_pd_slaac(self): with self.network(name='net') as network: subnet = self._make_v6_subnet( - network, n_const.IPV6_SLAAC, ipv6_pd=True) + network, constants.IPV6_SLAAC, ipv6_pd=True) subnet_id = subnet['subnet']['id'] # update subnet with new prefix prefix = '2001::/64' @@ -1932,7 +1935,7 @@ with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': '2607:f0d0:1002:51::5'}]} net_id = subnet['subnet']['network_id'] @@ -1944,7 +1947,7 @@ with self.subnet(gateway_ip='fe80::1', cidr='fe80::/64', ip_version=6, - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'], 'ip_address': 'fe80::1'}]} net_id = subnet['subnet']['network_id'] @@ -1960,7 +1963,7 @@ with self.subnet(gateway_ip='fe80::1', cidr='2607:f0d0:1002:51::/64', ip_version=6, - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: with self.port(subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}]) as port: @@ -1979,7 +1982,7 @@ cidr='2607:f0d0:1002:51::/64', ip_version=6, gateway_ip='fe80::1', - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet2: + ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, @@ -2008,7 +2011,7 @@ cidr='2607:f0d0:1002:51::/64', ip_version=6, gateway_ip='fe80::1', - ipv6_address_mode=n_const.IPV6_SLAAC): + ipv6_address_mode=constants.IPV6_SLAAC): subnet_ip_net = netaddr.IPNetwork(subnet_v4['subnet']['cidr']) # Create a router port without specifying fixed_ips port = self._make_port( @@ -2029,7 +2032,7 @@ res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) - subnet = self._make_v6_subnet(network, n_const.IPV6_SLAAC) + subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet), @@ -2046,7 +2049,7 @@ with self.port(subnet=subnet, fixed_ips=fixed_ips) as port: port_fixed_ips = port['port']['fixed_ips'] self.assertEqual(1, len(port_fixed_ips)) - if addr_mode == n_const.IPV6_SLAAC: + if addr_mode == constants.IPV6_SLAAC: exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet) self.assertEqual(exp_ip_addr, port_fixed_ips[0]['ip_address']) @@ -2055,15 +2058,15 @@ def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( - addr_mode=n_const.IPV6_SLAAC) + addr_mode=constants.IPV6_SLAAC) def test_create_port_with_ipv6_pd_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( - addr_mode=n_const.IPV6_SLAAC, ipv6_pd=True) + addr_mode=constants.IPV6_SLAAC, ipv6_pd=True) def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self): self._test_create_port_with_ipv6_subnet_in_fixed_ips( - addr_mode=n_const.DHCPV6_STATEFUL) + addr_mode=constants.DHCPV6_STATEFUL) def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): """Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets.""" @@ -2076,13 +2079,13 @@ {'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24', 'ip_version': 4, 'ra_addr_mode': None}, {'gateway': 'fe80::1', 'cidr': 'fe80::/64', - 'ip_version': 6, 'ra_addr_mode': n_const.IPV6_SLAAC}, + 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe81::1', 'cidr': 'fe81::/64', - 'ip_version': 6, 'ra_addr_mode': n_const.IPV6_SLAAC}, + 'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC}, {'gateway': 'fe82::1', 'cidr': 'fe82::/64', - 'ip_version': 6, 'ra_addr_mode': n_const.DHCPV6_STATEFUL}, + 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}, {'gateway': 'fe83::1', 'cidr': 'fe83::/64', - 'ip_version': 6, 'ra_addr_mode': n_const.DHCPV6_STATEFUL}] + 'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}] subnets = {} for sub_dict in sub_dicts: subnet = self._make_subnet( @@ -2100,15 +2103,15 @@ # IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6 # SLAAC subnets. self.assertEqual(4, len(port['port']['fixed_ips'])) - addr_mode_count = {None: 0, n_const.DHCPV6_STATEFUL: 0, - n_const.IPV6_SLAAC: 0} + addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0, + constants.IPV6_SLAAC: 0} for fixed_ip in port['port']['fixed_ips']: subnet_id = fixed_ip['subnet_id'] if subnet_id in subnets: addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1 self.assertEqual(1, addr_mode_count[None]) - self.assertEqual(1, addr_mode_count[n_const.DHCPV6_STATEFUL]) - self.assertEqual(2, addr_mode_count[n_const.IPV6_SLAAC]) + self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL]) + self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC]) def test_delete_port_with_ipv6_slaac_address(self): """Test that a port with an IPv6 SLAAC address can be deleted.""" @@ -2116,7 +2119,7 @@ admin_state_up=True) network = self.deserialize(self.fmt, res) # Create a port that has an associated IPv6 SLAAC address - self._make_v6_subnet(network, n_const.IPV6_SLAAC) + self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(1, len(port['port']['fixed_ips'])) @@ -2133,7 +2136,7 @@ # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4) - subnet_v6 = self._make_v6_subnet(network, n_const.IPV6_SLAAC) + subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) @@ -2157,7 +2160,7 @@ # Create a port using an IPv4 subnet and an IPv6 SLAAC subnet subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1', cidr='10.0.0.0/24', ip_version=4) - subnet_v6 = self._make_v6_subnet(network, n_const.IPV6_SLAAC) + subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(2, len(port['port']['fixed_ips'])) @@ -2184,12 +2187,12 @@ gateway='2001:100::1', cidr='2001:100::0/64', ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC) + ipv6_ra_mode=constants.IPV6_SLAAC) v6_subnet_2 = self._make_subnet(self.fmt, network, gateway='2001:200::1', cidr='2001:200::0/64', ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC) + ipv6_ra_mode=constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) port_mac = port['port']['mac_address'] cidr_1 = v6_subnet_1['subnet']['cidr'] @@ -3119,8 +3122,6 @@ def test_create_subnet_no_ip_version(self): with self.network() as network: - cfg.CONF.set_override('default_ipv4_subnet_pool', None) - cfg.CONF.set_override('default_ipv6_subnet_pool', None) data = {'subnet': {'network_id': network['network']['id'], 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) @@ -3131,7 +3132,6 @@ with self.network() as network: tenant_id = network['network']['tenant_id'] cfg.CONF.set_override('ipv6_pd_enabled', False) - cfg.CONF.set_override('default_ipv6_subnet_pool', None) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', 'tenant_id': tenant_id}} @@ -3365,8 +3365,8 @@ network = self.deserialize(self.fmt, res) subnet = self._make_subnet(self.fmt, network, gateway='fe80::1', cidr='fe80::/64', ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC) + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) kwargs = {} if port_owner: kwargs['device_owner'] = port_owner @@ -3666,8 +3666,8 @@ 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL, - ipv6_address_mode=n_const.DHCPV6_STATEFUL) + ipv6_ra_mode=constants.DHCPV6_STATEFUL, + ipv6_address_mode=constants.DHCPV6_STATEFUL) # Gateway is first IP in IPv6 DHCPv6 stateful subnet gateway = '2001::1' allocation_pools = [{'start': '2001::2', @@ -3677,15 +3677,15 @@ 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL, - ipv6_address_mode=n_const.DHCPV6_STATEFUL) + ipv6_ra_mode=constants.DHCPV6_STATEFUL, + ipv6_address_mode=constants.DHCPV6_STATEFUL) # If gateway_ip is not specified, allocate first IP from the subnet expected = {'gateway_ip': gateway, 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC) + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) @testtools.skipIf(tools.is_bsd(), 'bug/1484837') def test_create_subnet_ipv6_pd_gw_values(self): @@ -3699,8 +3699,8 @@ 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.DHCPV6_STATELESS, - ipv6_address_mode=n_const.DHCPV6_STATELESS) + ipv6_ra_mode=constants.DHCPV6_STATELESS, + ipv6_address_mode=constants.DHCPV6_STATELESS) # Gateway is first IP in IPv6 DHCPv6 Stateless subnet gateway = '::1' allocation_pools = [{'start': '::2', @@ -3710,15 +3710,15 @@ 'allocation_pools': allocation_pools} self._test_create_subnet(expected=expected, gateway_ip=gateway, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.DHCPV6_STATELESS, - ipv6_address_mode=n_const.DHCPV6_STATELESS) + ipv6_ra_mode=constants.DHCPV6_STATELESS, + ipv6_address_mode=constants.DHCPV6_STATELESS) # If gateway_ip is not specified, allocate first IP from the subnet expected = {'gateway_ip': gateway, 'cidr': cidr} self._test_create_subnet(expected=expected, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC) + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_gw_outside_cidr_returns_201(self): with self.network() as network: @@ -4056,7 +4056,7 @@ def test_create_subnet_ipv6_ra_modes(self): # Test all RA modes with no address mode specified - for ra_mode in n_const.IPV6_MODES: + for ra_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ra_mode) self._test_validate_subnet_ipv6_pd_modes( @@ -4064,7 +4064,7 @@ def test_create_subnet_ipv6_addr_modes(self): # Test all address modes with no RA mode specified - for addr_mode in n_const.IPV6_MODES: + for addr_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_address_mode=addr_mode) self._test_validate_subnet_ipv6_pd_modes( @@ -4072,7 +4072,7 @@ def test_create_subnet_ipv6_same_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode==addr_mode - for ipv6_mode in n_const.IPV6_MODES: + for ipv6_mode in constants.IPV6_MODES: self._test_validate_subnet_ipv6_modes( ipv6_ra_mode=ipv6_mode, ipv6_address_mode=ipv6_mode) @@ -4083,7 +4083,7 @@ def test_create_subnet_ipv6_different_ra_and_addr_modes(self): # Test all ipv6 modes with ra_mode!=addr_mode for ra_mode, addr_mode in itertools.permutations( - n_const.IPV6_MODES, 2): + constants.IPV6_MODES, 2): self._test_validate_subnet_ipv6_modes( expect_success=not (ra_mode and addr_mode), ipv6_ra_mode=ra_mode, @@ -4099,8 +4099,8 @@ subnet = self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL, - ipv6_address_mode=n_const.DHCPV6_STATEFUL) + ipv6_ra_mode=constants.DHCPV6_STATEFUL, + ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(constants.IP_VERSION_6, subnet['subnet']['ip_version']) self.assertEqual(gateway_ip, @@ -4117,8 +4117,8 @@ self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL, - ipv6_address_mode=n_const.DHCPV6_STATEFUL) + ipv6_ra_mode=constants.DHCPV6_STATEFUL, + ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) @@ -4128,8 +4128,8 @@ subnet = self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=constants.IP_VERSION_6, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL, - ipv6_address_mode=n_const.DHCPV6_STATEFUL) + ipv6_ra_mode=constants.DHCPV6_STATEFUL, + ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(constants.IP_VERSION_6, subnet['subnet']['ip_version']) self.assertEqual(gateway_ip, @@ -4143,15 +4143,15 @@ self._test_create_subnet( gateway_ip=gateway_ip, cidr=cidr, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC) + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self): gateway_ip = 'fe80::1' cidr = 'fe80::/64' with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: - for mode in n_const.IPV6_MODES: + for mode in constants.IPV6_MODES: self._test_create_subnet(gateway_ip=gateway_ip, cidr=cidr, ip_version=6, enable_dhcp=False, @@ -4189,7 +4189,7 @@ with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr, ip_version=4, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL) + ipv6_ra_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) @@ -4199,7 +4199,7 @@ webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet( cidr=cidr, ip_version=4, - ipv6_address_mode=n_const.DHCPV6_STATEFUL) + ipv6_address_mode=constants.DHCPV6_STATEFUL) self.assertEqual(webob.exc.HTTPClientError.code, ctx_manager.exception.code) @@ -4256,30 +4256,30 @@ def test_create_subnet_ipv6_slaac_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( - n_const.IPV6_SLAAC) + constants.IPV6_SLAAC) def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( - n_const.DHCPV6_STATELESS) + constants.DHCPV6_STATELESS) def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( - n_const.IPV6_SLAAC, + constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_DHCP) def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( - n_const.IPV6_SLAAC, + constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( - n_const.IPV6_SLAAC, + constants.IPV6_SLAAC, device_owner=constants.DEVICE_OWNER_ROUTER_SNAT) def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self._test_create_subnet_ipv6_auto_addr_with_port_on_network( - n_const.IPV6_SLAAC, insert_db_reference_error=True) + constants.IPV6_SLAAC, insert_db_reference_error=True) def test_update_subnet_no_gateway(self): with self.subnet() as subnet: @@ -4410,11 +4410,11 @@ def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self._test_subnet_update_ipv4_and_ipv6_pd_subnets( - ra_addr_mode=n_const.IPV6_SLAAC) + ra_addr_mode=constants.IPV6_SLAAC) def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self._test_subnet_update_ipv4_and_ipv6_pd_subnets( - ra_addr_mode=n_const.DHCPV6_STATELESS) + ra_addr_mode=constants.DHCPV6_STATELESS) def test_update_subnet_shared_returns_400(self): with self.network(shared=True) as network: @@ -4541,10 +4541,10 @@ def test_update_subnet_ipv6_attributes_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: - data = {'subnet': {'ipv6_ra_mode': n_const.DHCPV6_STATEFUL, - 'ipv6_address_mode': n_const.DHCPV6_STATEFUL}} + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL, + 'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) @@ -4553,8 +4553,8 @@ def test_update_subnet_ipv6_ra_mode_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', - ipv6_ra_mode=n_const.IPV6_SLAAC) as subnet: - data = {'subnet': {'ipv6_ra_mode': n_const.DHCPV6_STATEFUL}} + ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) @@ -4563,8 +4563,8 @@ def test_update_subnet_ipv6_address_mode_fails(self): with self.subnet(ip_version=6, cidr='fe80::/64', - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: - data = {'subnet': {'ipv6_address_mode': n_const.DHCPV6_STATEFUL}} + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: + data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) @@ -4573,8 +4573,8 @@ def test_update_subnet_ipv6_cannot_disable_dhcp(self): with self.subnet(ip_version=6, cidr='fe80::/64', - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet: + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC) as subnet: data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) @@ -4586,7 +4586,7 @@ with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_ra_mode': - n_const.DHCPV6_STATEFUL}} + constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) @@ -4597,7 +4597,7 @@ with self.network() as network: with self.subnet(network=network) as subnet: data = {'subnet': {'ipv6_address_mode': - n_const.DHCPV6_STATEFUL}} + constants.DHCPV6_STATEFUL}} req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = req.get_response(self.api) @@ -6007,7 +6007,7 @@ self.assertEqual(400, res.status_int) -class DbModelTestCase(testlib_api.SqlTestCase): +class DbModelMixin(object): """DB model tests.""" def test_repr(self): """testing the string representation of 'model' classes.""" @@ -6016,59 +6016,23 @@ actual_repr_output = repr(network) exp_start_with = "") final_exp = exp_start_with + exp_middle + exp_end_with self.assertEqual(final_exp, actual_repr_output) - def _make_network(self, ctx): - with ctx.session.begin(): - network = models_v2.Network(name="net_net", status="OK", - tenant_id='dbcheck', - admin_state_up=True) - ctx.session.add(network) - return network - - def _make_subnet(self, ctx, network_id): - with ctx.session.begin(): - subnet = models_v2.Subnet(name="subsub", ip_version=4, - tenant_id='dbcheck', - cidr='turn_down_for_what', - network_id=network_id) - ctx.session.add(subnet) - return subnet - - def _make_port(self, ctx, network_id): - with ctx.session.begin(): - port = models_v2.Port(network_id=network_id, mac_address='1', - tenant_id='dbcheck', - admin_state_up=True, status="COOL", - device_id="devid", device_owner="me") - ctx.session.add(port) - return port - - def _make_subnetpool(self, ctx): - with ctx.session.begin(): - subnetpool = models_v2.SubnetPool( - ip_version=4, default_prefixlen=4, min_prefixlen=4, - max_prefixlen=4, shared=False, default_quota=4, - address_scope_id='f', tenant_id='dbcheck', - is_default=False - ) - ctx.session.add(subnetpool) - return subnetpool - def _make_security_group_and_rule(self, ctx): with ctx.session.begin(): - sg = sgdb.SecurityGroup(name='sg', description='sg') - rule = sgdb.SecurityGroupRule(security_group=sg, port_range_min=1, - port_range_max=2, protocol='TCP', - ethertype='v4', direction='ingress', - remote_ip_prefix='0.0.0.0/0') + sg = sg_models.SecurityGroup(name='sg', description='sg') + rule = sg_models.SecurityGroupRule( + security_group=sg, port_range_min=1, + port_range_max=2, protocol='TCP', + ethertype='v4', direction='ingress', + remote_ip_prefix='0.0.0.0/0') ctx.session.add(sg) ctx.session.add(rule) return sg, rule @@ -6089,8 +6053,8 @@ def _get_neutron_attr(self, ctx, attr_id): return ctx.session.query( - models_v2.model_base.StandardAttribute).filter( - models_v2.model_base.StandardAttribute.id == attr_id).one() + standard_attr.StandardAttribute).filter( + standard_attr.StandardAttribute.id == attr_id).one() def _test_standardattr_removed_on_obj_delete(self, ctx, obj): attr_id = obj.standard_attr_id @@ -6147,9 +6111,9 @@ ctx = context.get_admin_context() sg, rule = self._make_security_group_and_rule(ctx) self._test_staledata_error_on_concurrent_object_update( - sgdb.SecurityGroup, sg['id']) + sg_models.SecurityGroup, sg['id']) self._test_staledata_error_on_concurrent_object_update( - sgdb.SecurityGroupRule, rule['id']) + sg_models.SecurityGroupRule, rule['id']) def _test_staledata_error_on_concurrent_object_update(self, model, dbid): """Test revision compare and swap update breaking on concurrent update. @@ -6251,6 +6215,84 @@ disc, obj.standard_attr.resource_type) +class DbModelTenantTestCase(DbModelMixin, testlib_api.SqlTestCase): + def _make_network(self, ctx): + with ctx.session.begin(): + network = models_v2.Network(name="net_net", status="OK", + tenant_id='dbcheck', + admin_state_up=True) + ctx.session.add(network) + return network + + def _make_subnet(self, ctx, network_id): + with ctx.session.begin(): + subnet = models_v2.Subnet(name="subsub", ip_version=4, + tenant_id='dbcheck', + cidr='turn_down_for_what', + network_id=network_id) + ctx.session.add(subnet) + return subnet + + def _make_port(self, ctx, network_id): + with ctx.session.begin(): + port = models_v2.Port(network_id=network_id, mac_address='1', + tenant_id='dbcheck', + admin_state_up=True, status="COOL", + device_id="devid", device_owner="me") + ctx.session.add(port) + return port + + def _make_subnetpool(self, ctx): + with ctx.session.begin(): + subnetpool = models_v2.SubnetPool( + ip_version=4, default_prefixlen=4, min_prefixlen=4, + max_prefixlen=4, shared=False, default_quota=4, + address_scope_id='f', tenant_id='dbcheck', + is_default=False + ) + ctx.session.add(subnetpool) + return subnetpool + + +class DbModelProjectTestCase(DbModelMixin, testlib_api.SqlTestCase): + def _make_network(self, ctx): + with ctx.session.begin(): + network = models_v2.Network(name="net_net", status="OK", + project_id='dbcheck', + admin_state_up=True) + ctx.session.add(network) + return network + + def _make_subnet(self, ctx, network_id): + with ctx.session.begin(): + subnet = models_v2.Subnet(name="subsub", ip_version=4, + project_id='dbcheck', + cidr='turn_down_for_what', + network_id=network_id) + ctx.session.add(subnet) + return subnet + + def _make_port(self, ctx, network_id): + with ctx.session.begin(): + port = models_v2.Port(network_id=network_id, mac_address='1', + project_id='dbcheck', + admin_state_up=True, status="COOL", + device_id="devid", device_owner="me") + ctx.session.add(port) + return port + + def _make_subnetpool(self, ctx): + with ctx.session.begin(): + subnetpool = models_v2.SubnetPool( + ip_version=4, default_prefixlen=4, min_prefixlen=4, + max_prefixlen=4, shared=False, default_quota=4, + address_scope_id='f', project_id='dbcheck', + is_default=False + ) + ctx.session.add(subnetpool) + return subnetpool + + class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase, testlib_api.SqlTestCase): """Tests for NeutronDbPluginV2 as Mixin. @@ -6383,7 +6425,7 @@ def _event_incrementer(*args, **kwargs): self._db_execute_count += 1 - engine = db_api.get_engine() + engine = db_api.context_manager.get_legacy_facade().get_engine() event.listen(engine, 'after_execute', _event_incrementer) self.addCleanup(event.remove, engine, 'after_execute', _event_incrementer) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_ipam_backend_mixin.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_ipam_backend_mixin.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_ipam_backend_mixin.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_ipam_backend_mixin.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,7 +18,6 @@ from neutron_lib import constants import webob.exc -from neutron.common import constants as n_const from neutron.db import db_base_plugin_v2 from neutron.db import ipam_backend_mixin from neutron.db import portbindings_db @@ -41,12 +40,18 @@ self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF def _prepare_ips(self, ips): - return [{'ip_address': ip[1], - 'subnet_id': ip[0]} for ip in ips] + results = [] + for ip in ips: + ip_dict = {'ip_address': ip[1], + 'subnet_id': ip[0]} + if len(ip) > 2: + ip_dict['delete_subnet'] = ip[2] + results.append(ip_dict) + return results def _mock_slaac_subnet_on(self): - slaac_subnet = {'ipv6_address_mode': n_const.IPV6_SLAAC, - 'ipv6_ra_mode': n_const.IPV6_SLAAC} + slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC, + 'ipv6_ra_mode': constants.IPV6_SLAAC} self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet) def _mock_slaac_subnet_off(self): @@ -54,6 +59,18 @@ 'ipv6_ra_mode': None} self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet) + def _mock_slaac_for_subnet_ids(self, subnet_ids): + """Mock incoming subnets as autoaddressed.""" + def _get_subnet(context, subnet_id): + if subnet_id in subnet_ids: + return {'ipv6_address_mode': constants.IPV6_SLAAC, + 'ipv6_ra_mode': constants.IPV6_SLAAC} + else: + return {'ipv6_address_mode': None, + 'ipv6_ra_mode': None} + + self.mixin._get_subnet = mock.Mock(side_effect=_get_subnet) + def _test_get_changed_ips_for_port(self, expected_change, original_ips, new_ips, owner): change = self.mixin._get_changed_ips_for_port(self.ctx, @@ -87,6 +104,26 @@ self._test_get_changed_ips_for_port(expected_change, original_ips, new_ips, self.owner_non_router) + def test__get_changed_ips_for_port_remove_autoaddress(self): + new = (('id-5', '2000:1234:5678::12FF:FE34:5678', True), + ('id-1', '192.168.1.1')) + new_ips = self._prepare_ips(new) + reference_ips = [ip for ip in new_ips + if ip['subnet_id'] == 'id-1'] + + original = (('id-5', '2000:1234:5678::12FF:FE34:5678'),) + original_ips = self._prepare_ips(original) + + # mock ipv6 subnet as auto addressed and leave ipv4 as regular + self._mock_slaac_for_subnet_ids([new[0][0]]) + # Autoaddressed ip allocation has to be removed + # if it has 'delete_subnet' flag set to True + expected_change = self.mixin.Changes(add=reference_ips, + original=[], + remove=original_ips) + self._test_get_changed_ips_for_port(expected_change, original_ips, + new_ips, self.owner_non_router) + def test__get_changed_ips_for_port_autoaddress_ipv6_pd_enabled(self): owner_not_router = constants.DEVICE_OWNER_DHCP new_ips = self._prepare_ips(self.default_new_ips) @@ -97,8 +134,8 @@ # mock to test auto address part pd_subnet = {'subnetpool_id': constants.IPV6_PD_POOL_ID, - 'ipv6_address_mode': n_const.IPV6_SLAAC, - 'ipv6_ra_mode': n_const.IPV6_SLAAC} + 'ipv6_address_mode': constants.IPV6_SLAAC, + 'ipv6_ra_mode': constants.IPV6_SLAAC} self.mixin._get_subnet = mock.Mock(return_value=pd_subnet) # make a copy of original_ips @@ -222,5 +259,4 @@ class TestPortUpdateIpamML2(TestPortUpdateIpam): def setUp(self): - super(TestPortUpdateIpamML2, self).setUp( - plugin='neutron.plugins.ml2.plugin.Ml2Plugin') + super(TestPortUpdateIpamML2, self).setUp(plugin='ml2') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_ipam_non_pluggable_backend.py 2016-08-03 20:10:34.000000000 +0000 @@ -67,7 +67,7 @@ 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'fixed_ips': n_const.ATTR_NOT_SPECIFIED, 'mac_address': '12:34:56:78:44:ab', - 'device_owner': 'compute'}} + 'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX}} expected = [] for subnet in subnets: addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( @@ -100,7 +100,7 @@ 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'fixed_ips': n_const.ATTR_NOT_SPECIFIED, 'mac_address': '12:34:56:78:44:ab', - 'device_owner': 'compute'}} + 'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX}} expected = [] for subnet in subnets: addr = str(ipv6_utils.get_ipv6_addr_by_EUI64( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_ipam_pluggable_backend.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_ipam_pluggable_backend.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_ipam_pluggable_backend.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_ipam_pluggable_backend.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import mock import netaddr from neutron_lib import constants @@ -75,7 +76,9 @@ 'driver': mock.Mock(), 'subnet': mock.Mock(), 'subnets': mock.Mock(), - 'port': {'device_owner': 'compute:None'}, + 'port': { + 'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' + }, 'subnet_request': ipam_req.SpecificSubnetRequest( self.tenant_id, self.subnet_id, @@ -356,8 +359,8 @@ allocation_pools = [netaddr.IPRange('::2', '::ffff:ffff:ffff:ffff')] with self.subnet(cidr=None, ip_version=6, subnetpool_id=constants.IPV6_PD_POOL_ID, - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC): + ipv6_ra_mode=constants.IPV6_SLAAC, + ipv6_address_mode=constants.IPV6_SLAAC): pool_mock.get_instance.assert_called_once_with(None, mock.ANY) self.assertTrue(mocks['driver'].allocate_subnet.called) request = mocks['driver'].allocate_subnet.call_args[0][0] @@ -777,3 +780,74 @@ mocks['ipam']._ipam_allocate_ips.assert_called_once_with( context, mocks['driver'], db_port, changes.remove, revert_on_fail=False) + + +class TestRollback(test_db_base.NeutronDbPluginV2TestCase): + def setUp(self): + cfg.CONF.set_override('ipam_driver', 'internal') + super(TestRollback, self).setUp() + + def test_ipam_rollback_not_broken_on_session_rollback(self): + """Triggers an error that calls rollback on session.""" + with self.network() as net: + with self.subnet(network=net, cidr='10.0.1.0/24') as subnet1: + with self.subnet(network=net, cidr='10.0.2.0/24') as subnet2: + pass + + # If this test fails and this method appears in the server side stack + # trace then IPAM rollback was likely tried using a session which had + # already been rolled back by the DB exception. + def rollback(func, *args, **kwargs): + func(*args, **kwargs) + + # Ensure DBDuplicate exception is raised in the context where IPAM + # rollback is triggered. It "breaks" the session because it triggers DB + # rollback. Inserting a flush in _store_ip_allocation does this. + orig = ipam_pluggable_backend.IpamPluggableBackend._store_ip_allocation + + def store(context, ip_address, *args, **kwargs): + try: + return orig(context, ip_address, *args, **kwargs) + finally: + context.session.flush() + + # Create a port to conflict with later. Simulates a race for addresses. + result = self._create_port( + self.fmt, + net_id=net['network']['id'], + fixed_ips=[{'subnet_id': subnet1['subnet']['id']}, + {'subnet_id': subnet2['subnet']['id']}]) + port = self.deserialize(self.fmt, result) + fixed_ips = port['port']['fixed_ips'] + + # Hands out the same 2nd IP to create conflict and trigger rollback + ips = [{'subnet_id': fixed_ips[0]['subnet_id'], + 'ip_address': fixed_ips[0]['ip_address']}, + {'subnet_id': fixed_ips[1]['subnet_id'], + 'ip_address': fixed_ips[1]['ip_address']}] + + def alloc(*args, **kwargs): + def increment_address(a): + a['ip_address'] = str(netaddr.IPAddress(a['ip_address']) + 1) + # Increment 1st address to return a free address on the first call + increment_address(ips[0]) + try: + return copy.deepcopy(ips) + finally: + # Increment 2nd address to return free address on the 2nd call + increment_address(ips[1]) + + Backend = ipam_pluggable_backend.IpamPluggableBackend + with mock.patch.object(Backend, '_store_ip_allocation', wraps=store),\ + mock.patch.object(Backend, '_safe_rollback', wraps=rollback),\ + mock.patch.object(Backend, '_allocate_ips_for_port', wraps=alloc): + # Create port with two addresses. The wrapper lets one succeed + # then simulates race for the second to trigger IPAM rollback. + response = self._create_port( + self.fmt, + net_id=net['network']['id'], + fixed_ips=[{'subnet_id': subnet1['subnet']['id']}, + {'subnet_id': subnet2['subnet']['id']}]) + + # When all goes well, retry kicks in and the operation is successful. + self.assertEqual(webob.exc.HTTPCreated.code, response.status_int) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_l3_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_l3_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_l3_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_l3_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -14,6 +14,7 @@ # limitations under the License. import mock +from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc import testtools @@ -161,7 +162,7 @@ def test_prevent_l3_port_no_fixed_ips(self, gp): # without fixed IPs is allowed gp.return_value.get_port.return_value = { - 'device_owner': 'network:router_interface', 'fixed_ips': [], + 'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'fixed_ips': [], 'id': 'f' } self.db.prevent_l3_port_deletion(None, None) @@ -170,7 +171,7 @@ def test_prevent_l3_port_no_router(self, gp): # without router is allowed gp.return_value.get_port.return_value = { - 'device_owner': 'network:router_interface', + 'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'device_id': '44', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_router = mock.Mock() @@ -180,7 +181,7 @@ @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_existing_router(self, gp): gp.return_value.get_port.return_value = { - 'device_owner': 'network:router_interface', + 'device_owner': n_const.DEVICE_OWNER_ROUTER_INTF, 'device_id': 'some_router', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_router = mock.Mock() @@ -190,7 +191,7 @@ @mock.patch.object(manager.NeutronManager, 'get_plugin') def test_prevent_l3_port_existing_floating_ip(self, gp): gp.return_value.get_port.return_value = { - 'device_owner': 'network:floatingip', + 'device_owner': n_const.DEVICE_OWNER_FLOATINGIP, 'device_id': 'some_flip', 'id': 'f', 'fixed_ips': [{'ip_address': '1.1.1.1', 'subnet_id': '4'}]} self.db.get_floatingip = mock.Mock() @@ -199,7 +200,7 @@ @mock.patch.object(l3_db, '_notify_subnetpool_address_scope_update') def test_subscribe_address_scope_of_subnetpool(self, notify): - l3_db.subscribe() + l3_db.L3RpcNotifierMixin._subscribe_callbacks() registry.notify(resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE, mock.ANY, context=mock.ANY, subnetpool_id='fake_id') @@ -228,6 +229,8 @@ return_value=router_dict),\ mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_update_router_gw_info') as urgi,\ + mock.patch.object(l3_db.L3_NAT_dbonly_mixin, '_get_router', + return_value=router_db),\ mock.patch.object(l3_db.L3_NAT_db_mixin, 'notify_router_updated')\ as nru: diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_l3_dvr_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_l3_dvr_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_l3_dvr_db.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_l3_dvr_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,6 +17,7 @@ from neutron_lib import constants as l3_const from neutron_lib import exceptions from oslo_utils import uuidutils +import testtools from neutron.common import constants as n_const from neutron import context @@ -42,8 +43,7 @@ class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self): - core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' - super(L3DvrTestCase, self).setUp(plugin=core_plugin) + super(L3DvrTestCase, self).setUp(plugin='ml2') self.core_plugin = manager.NeutronManager.get_plugin() self.ctx = context.get_admin_context() self.mixin = FakeL3Plugin() @@ -526,7 +526,7 @@ self.ctx, filters=dvr_filters) self.assertEqual(1, len(dvr_ports)) - def test_remove_router_interface_csnat_ports_removal_with_ipv6(self): + def _setup_router_with_v4_and_v6(self): router_dict = {'name': 'test_router', 'admin_state_up': True, 'distributed': True} router = self._create_router(router_dict) @@ -549,36 +549,95 @@ {'subnet_id': subnet_v4['subnet']['id']}) self.mixin.add_router_interface(self.ctx, router['id'], {'subnet_id': subnet_v6['subnet']['id']}) - csnat_filters = {'device_owner': - [l3_const.DEVICE_OWNER_ROUTER_SNAT]} - csnat_ports = self.core_plugin.get_ports( - self.ctx, filters=csnat_filters) - self.assertEqual(2, len(csnat_ports)) - dvr_filters = {'device_owner': - [l3_const.DEVICE_OWNER_DVR_INTERFACE]} - dvr_ports = self.core_plugin.get_ports( - self.ctx, filters=dvr_filters) - self.assertEqual(2, len(dvr_ports)) - with mock.patch.object( - manager.NeutronManager, - 'get_service_plugins') as get_svc_plugin: - get_svc_plugin.return_value = { - plugin_const.L3_ROUTER_NAT: plugin} - self.mixin.manager = manager - self.mixin.remove_router_interface( - self.ctx, router['id'], - {'subnet_id': subnet_v4['subnet']['id']}) - - csnat_ports = self.core_plugin.get_ports( - self.ctx, filters=csnat_filters) - self.assertEqual(1, len(csnat_ports)) - self.assertEqual( - subnet_v6['subnet']['id'], - csnat_ports[0]['fixed_ips'][0]['subnet_id']) - - dvr_ports = self.core_plugin.get_ports( - self.ctx, filters=dvr_filters) - self.assertEqual(1, len(dvr_ports)) + get_svc_plugin = mock.patch.object( + manager.NeutronManager, 'get_service_plugins').start() + get_svc_plugin.return_value = { + plugin_const.L3_ROUTER_NAT: plugin} + self.mixin.manager = manager + return router, subnet_v4, subnet_v6 + + def test_undo_router_interface_change_on_csnat_error(self): + self._test_undo_router_interface_change_on_csnat_error(False) + + def test_undo_router_interface_change_on_csnat_error_revert_failure(self): + self._test_undo_router_interface_change_on_csnat_error(True) + + def _test_undo_router_interface_change_on_csnat_error(self, fail_revert): + router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6() + net = {'network': {'id': subnet_v6['subnet']['network_id'], + 'tenant_id': subnet_v6['subnet']['tenant_id']}} + orig_update = self.mixin._core_plugin.update_port + + def update_port(*args, **kwargs): + # 1st port update is the interface, 2nd is csnat, 3rd is revert + # we want to simulate errors after the 1st + update_port.calls += 1 + if update_port.calls == 2: + raise RuntimeError('csnat update failure') + if update_port.calls == 3 and fail_revert: + # this is to ensure that if the revert fails, the original + # exception is raised (not this ValueError) + raise ValueError('failure from revert') + return orig_update(*args, **kwargs) + update_port.calls = 0 + self.mixin._core_plugin.update_port = update_port + + with self.subnet(network=net, cidr='fe81::/64', + gateway_ip='fe81::1', ip_version=6) as subnet2_v6: + with testtools.ExpectedException(RuntimeError): + self.mixin.add_router_interface(self.ctx, router['id'], + {'subnet_id': subnet2_v6['subnet']['id']}) + if fail_revert: + # a revert failure will mean the interface is still added + # so we can't re-add it + return + # starting over should work if first interface was cleaned up + self.mixin.add_router_interface(self.ctx, router['id'], + {'subnet_id': subnet2_v6['subnet']['id']}) + + def test_remove_router_interface_csnat_ports_removal_with_ipv6(self): + router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6() + csnat_filters = {'device_owner': + [l3_const.DEVICE_OWNER_ROUTER_SNAT]} + csnat_ports = self.core_plugin.get_ports( + self.ctx, filters=csnat_filters) + self.assertEqual(2, len(csnat_ports)) + dvr_filters = {'device_owner': + [l3_const.DEVICE_OWNER_DVR_INTERFACE]} + dvr_ports = self.core_plugin.get_ports( + self.ctx, filters=dvr_filters) + self.assertEqual(2, len(dvr_ports)) + self.mixin.remove_router_interface( + self.ctx, router['id'], + {'subnet_id': subnet_v4['subnet']['id']}) + csnat_ports = self.core_plugin.get_ports( + self.ctx, filters=csnat_filters) + self.assertEqual(1, len(csnat_ports)) + self.assertEqual( + subnet_v6['subnet']['id'], + csnat_ports[0]['fixed_ips'][0]['subnet_id']) + + dvr_ports = self.core_plugin.get_ports( + self.ctx, filters=dvr_filters) + self.assertEqual(1, len(dvr_ports)) + + def test_remove_router_interface_csnat_port_missing_ip(self): + # NOTE(kevinbenton): this is a contrived scenario to reproduce + # a condition observed in bug/1609540. Once we figure out why + # these ports lose their IP we can remove this test. + router, subnet_v4, subnet_v6 = self._setup_router_with_v4_and_v6() + self.mixin.remove_router_interface( + self.ctx, router['id'], + {'subnet_id': subnet_v4['subnet']['id']}) + csnat_filters = {'device_owner': + [l3_const.DEVICE_OWNER_ROUTER_SNAT]} + csnat_ports = self.core_plugin.get_ports( + self.ctx, filters=csnat_filters) + self.core_plugin.update_port(self.ctx, csnat_ports[0]['id'], + {'port': {'fixed_ips': []}}) + self.mixin.remove_router_interface( + self.ctx, router['id'], + {'subnet_id': subnet_v6['subnet']['id']}) def test__validate_router_migration_notify_advanced_services(self): router = {'name': 'foo_router', 'admin_state_up': False} @@ -625,7 +684,7 @@ elif action == 'del': self.mixin.delete_arp_entry_for_dvr_service_port( self.ctx, port) - self.assertTrue(3, l3_notify.del_arp_entry.call_count) + self.assertEqual(3, l3_notify.del_arp_entry.call_count) def test_update_arp_entry_for_dvr_service_port_added(self): action = 'add' diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_l3_hamode_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_l3_hamode_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_l3_hamode_db.py 2016-06-03 15:08:31.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_l3_hamode_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -54,7 +54,7 @@ super(L3HATestFramework, self).setUp() self.admin_ctx = context.get_admin_context() - self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') + self.setup_coreplugin('ml2') self.core_plugin = manager.NeutronManager.get_plugin() notif_p = mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin, '_notify_ha_interfaces_updated') @@ -65,7 +65,7 @@ self.plugin.router_scheduler = l3_agent_scheduler.ChanceScheduler() self.agent1 = helpers.register_l3_agent() self.agent2 = helpers.register_l3_agent( - 'host_2', n_const.L3_AGENT_MODE_DVR_SNAT) + 'host_2', constants.L3_AGENT_MODE_DVR_SNAT) def _create_router(self, ha=True, tenant_id='tenant1', distributed=None, ctx=None, admin_state_up=True): @@ -122,6 +122,10 @@ l3_ext_ha_mode.HAMinimumAgentsNumberNotValid, self.plugin._check_num_agents_per_router) + def test_verify_configuration_min_l3_agents_per_router_eq_one(self): + cfg.CONF.set_override('min_l3_agents_per_router', 1) + self.plugin._check_num_agents_per_router() + def test_verify_configuration_max_l3_agents_below_min_l3_agents(self): cfg.CONF.set_override('max_l3_agents_per_router', 3) cfg.CONF.set_override('min_l3_agents_per_router', 4) @@ -546,6 +550,27 @@ network.network_id) self.assertEqual(allocs_before, allocs_after) + def test_migration_delete_ha_network_if_last_router(self): + router = self._create_router() + + self._migrate_router(router['id'], False) + self.assertIsNone( + self.plugin.get_ha_network(self.admin_ctx, router['tenant_id'])) + + def test_migration_no_delete_ha_network_if_not_last_router(self): + router = self._create_router() + router2 = self._create_router() + + network = self.plugin.get_ha_network(self.admin_ctx, + router['tenant_id']) + network2 = self.plugin.get_ha_network(self.admin_ctx, + router2['tenant_id']) + self.assertEqual(network, network2) + + self._migrate_router(router['id'], False) + self.assertIsNotNone( + self.plugin.get_ha_network(self.admin_ctx, router2['tenant_id'])) + def test_one_ha_router_one_not(self): self._create_router(ha=False) self._create_router() @@ -643,6 +668,14 @@ self.admin_ctx, router_db) self.assertTrue(_create_ha_interfaces.called) + def test_create_ha_interfaces_and_ensure_network_interface_failure(self): + + def _create_ha_interfaces(ctx, rdb, ha_net): + raise ValueError('broken') + with testtools.ExpectedException(ValueError): + self._test_ensure_with_patched_int_create(_create_ha_interfaces) + self.assertEqual([], self.core_plugin.get_networks(self.admin_ctx)) + def test_create_ha_interfaces_and_ensure_network_concurrent_delete(self): orig_create = self.plugin._create_ha_interfaces @@ -744,6 +777,34 @@ self.assertEqual(states[router['id']], router[n_const.HA_ROUTER_STATE_KEY]) + def test_sync_ha_router_info_ha_interface_port_concurrently_deleted(self): + router1 = self._create_router() + router2 = self._create_router() + + # retrieve all router ha port bindings + bindings = self.plugin.get_ha_router_port_bindings( + self.admin_ctx, [router1['id'], router2['id']]) + self.assertEqual(4, len(bindings)) + + routers = self.plugin.get_ha_sync_data_for_host( + self.admin_ctx, self.agent1['host'], self.agent1) + self.assertEqual(2, len(routers)) + + bindings = self.plugin.get_ha_router_port_bindings( + self.admin_ctx, [router1['id'], router2['id']], + self.agent1['host']) + self.assertEqual(2, len(bindings)) + + fake_binding = mock.Mock() + fake_binding.router_id = router2['id'] + fake_binding.port = None + with mock.patch.object( + self.plugin, "get_ha_router_port_bindings", + return_value=[bindings[0], fake_binding]): + routers = self.plugin.get_ha_sync_data_for_host( + self.admin_ctx, self.agent1['host'], self.agent1) + self.assertEqual(1, len(routers)) + def test_set_router_states_handles_concurrently_deleted_router(self): router1 = self._create_router() router2 = self._create_router() @@ -778,7 +839,7 @@ # Test setup registers two l3 agents. # Register another l3 agent with dvr mode and assert that # get_number_of_ha_agent_candidates return 2. - helpers.register_l3_agent('host_3', n_const.L3_AGENT_MODE_DVR) + helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR) num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling( self.admin_ctx) self.assertEqual(2, num_ha_candidates) @@ -791,7 +852,7 @@ # Test setup registers two l3 agents. # Register another l3 agent with dvr mode and assert that # get_number_of_ha_agent_candidates return 2. - helpers.register_l3_agent('host_3', n_const.L3_AGENT_MODE_DVR_SNAT) + helpers.register_l3_agent('host_3', constants.L3_AGENT_MODE_DVR_SNAT) num_ha_candidates = self.plugin.get_number_of_agents_for_scheduling( self.admin_ctx) self.assertEqual(3, num_ha_candidates) @@ -860,9 +921,13 @@ self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], nets_before) + ha_network = self.plugin.get_ha_network(self.admin_ctx, + router1['tenant_id']) with mock.patch.object(self.plugin, '_delete_ha_network', side_effect=exception): - self.plugin.delete_router(self.admin_ctx, router1['id']) + self.plugin.safe_delete_ha_network(self.admin_ctx, + ha_network, + router1['tenant_id']) nets_after = [net['name'] for net in self.core_plugin.get_networks(self.admin_ctx)] self.assertIn('HA network tenant %s' % router1['tenant_id'], @@ -989,7 +1054,7 @@ def test_ensure_host_set_on_ports_dvr_ha_binds_to_active(self): agent3 = helpers.register_l3_agent('host_3', - n_const.L3_AGENT_MODE_DVR_SNAT) + constants.L3_AGENT_MODE_DVR_SNAT) ext_net = self._create_network(self.core_plugin, self.admin_ctx, external=True) int_net = self._create_network(self.core_plugin, self.admin_ctx) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_model_base.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_model_base.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_model_base.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_model_base.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,50 @@ +# Copyright (c) 2016 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.tests import base as test_base + + +class GetUniqueKeysTestCase(test_base.BaseTestCase): + + def test_with_unique_constraints(self): + model = mock.Mock() + metadata = sa.MetaData() + model.__table__ = sa.Table( + "test_table", metadata, + sa.Column("a", sa.Integer, unique=True), + sa.Column("b", sa.Integer), + sa.Column("c", sa.Integer), + sa.Column("d", sa.Integer), + sa.UniqueConstraint("c", "d")) + expected = {("a",), ("c", "d")} + observed = {tuple(sorted(key)) for key in + model_base.get_unique_keys(model)} + self.assertEqual(expected, observed) + + def test_without_unique_constraints(self): + model = mock.Mock() + metadata = sa.MetaData() + model.__table__ = sa.Table( + "test_table", metadata, + sa.Column("a", sa.Integer), + sa.Column("b", sa.Integer)) + self.assertEqual([], model_base.get_unique_keys(model)) + + def test_not_a_model(self): + self.assertEqual([], model_base.get_unique_keys(None)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_portsecurity_db_common.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_portsecurity_db_common.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_portsecurity_db_common.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_portsecurity_db_common.py 2016-08-03 20:10:34.000000000 +0000 @@ -11,66 +11,69 @@ # under the License. import mock -from sqlalchemy.orm import exc +from neutron.db import common_db_mixin from neutron.db import portsecurity_db_common as pdc from neutron.extensions import portsecurity as psec +from neutron.objects import base as objects_base +from neutron.objects.network.extensions import port_security as n_ps +from neutron.objects.port.extensions import port_security as p_ps from neutron.tests import base -common = pdc.PortSecurityDbCommon +class FakePlugin(pdc.PortSecurityDbCommon, common_db_mixin.CommonDbMixin): + pass class PortSecurityDbCommonTestCase(base.BaseTestCase): def setUp(self): super(PortSecurityDbCommonTestCase, self).setUp() - self.common = common() + self.plugin = FakePlugin() def _test__get_security_binding_no_binding(self, getter): port_sec_enabled = True req = {psec.PORTSECURITY: port_sec_enabled} res = {} with mock.patch.object( - self.common, '_model_query', - create=True, - side_effect=exc.NoResultFound): + objects_base.NeutronDbObject, 'get_object', + return_value=None): val = getter(req, res) self.assertEqual(port_sec_enabled, val) def test__get_port_security_binding_no_binding(self): self._test__get_security_binding_no_binding( - self.common._get_port_security_binding) + self.plugin._get_port_security_binding) def test__get_network_security_binding_no_binding(self): self._test__get_security_binding_no_binding( - self.common._get_network_security_binding) + self.plugin._get_network_security_binding) - def _test__process_security_update_no_binding(self, creator, updater): + def _test__process_security_update_no_binding(self, res_name, obj_cls, + updater): req = {psec.PORTSECURITY: False} - res = {} - context = mock.Mock() + res = {'id': 'fake-id'} + context = mock.MagicMock() with mock.patch.object( - self.common, '_model_query', - create=True, - side_effect=exc.NoResultFound): - updater(context, req, res) - creator.assert_called_with(context, req, res) - - @mock.patch.object(common, '_process_port_port_security_create') - def test__process_port_port_security_update_no_binding(self, creator): - self._test__process_security_update_no_binding( - creator, - self.common._process_port_port_security_update) - - @mock.patch.object(common, '_process_network_port_security_create') - def test__process_network_port_security_update_no_binding(self, creator): - self._test__process_security_update_no_binding( - creator, - self.common._process_network_port_security_update) + self.plugin, '_process_port_security_create') as creator: + with mock.patch.object( + objects_base.NeutronDbObject, 'get_object', + return_value=None): + updater(context, req, res) + creator.assert_called_with(context, obj_cls, res_name, req, res) + + def test__process_port_port_security_update_no_binding(self): + self._test__process_security_update_no_binding( + 'port', p_ps.PortSecurity, + self.plugin._process_port_port_security_update) + + def test__process_network_port_security_update_no_binding(self): + self._test__process_security_update_no_binding( + 'network', n_ps.NetworkPortSecurity, + self.plugin._process_network_port_security_update) def test__extend_port_security_dict_no_port_security(self): for db_data in ({'port_security': None, 'name': 'net1'}, {}): response_data = {} - self.common._extend_port_security_dict(response_data, db_data) + self.plugin._extend_port_security_dict(response_data, db_data) self.assertTrue(response_data[psec.PORTSECURITY]) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_provisioning_blocks.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_provisioning_blocks.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_provisioning_blocks.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_provisioning_blocks.py 2016-08-03 20:10:34.000000000 +0000 @@ -94,6 +94,18 @@ resources.PORT, 'entity2') self.assertFalse(self.provisioned.called) + def test_is_object_blocked(self): + pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, + 'e1') + self.assertTrue(pb.is_object_blocked(self.ctx, self.port.id, + resources.PORT)) + self.assertFalse(pb.is_object_blocked(self.ctx, 'xyz', + resources.PORT)) + pb.provisioning_complete(self.ctx, self.port.id, + resources.PORT, 'e1') + self.assertFalse(pb.is_object_blocked(self.ctx, self.port.id, + resources.PORT)) + def test_remove_provisioning_component(self): pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT, 'e1') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_securitygroups_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_securitygroups_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/db/test_securitygroups_db.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/db/test_securitygroups_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -214,13 +214,19 @@ 'precommit_update', mock.ANY, context=mock.ANY, security_group=mock.ANY, security_group_id=sg_dict['id'])]) - def test_security_group_precommit_delete_event(self): + def test_security_group_precommit_and_after_delete_event(self): sg_dict = self.mixin.create_security_group(self.ctx, FAKE_SECGROUP) with mock.patch.object(registry, "notify") as mock_notify: self.mixin.delete_security_group(self.ctx, sg_dict['id']) - mock_notify.assert_has_calls([mock.call('security_group', - 'precommit_delete', mock.ANY, context=mock.ANY, - security_group=mock.ANY, security_group_id=sg_dict['id'])]) + mock_notify.assert_has_calls( + [mock.call('security_group', 'precommit_delete', + mock.ANY, context=mock.ANY, security_group=mock.ANY, + security_group_id=sg_dict['id'], + security_group_rule_ids=[mock.ANY, mock.ANY]), + mock.call('security_group', 'after_delete', + mock.ANY, context=mock.ANY, + security_group_id=sg_dict['id'], + security_group_rule_ids=[mock.ANY, mock.ANY])]) def test_security_group_rule_precommit_create_event_fail(self): registry.subscribe(fake_callback, resources.SECURITY_GROUP_RULE, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/debug/test_commands.py neutron-9.0.0~b3~dev557/neutron/tests/unit/debug/test_commands.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/debug/test_commands.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/debug/test_commands.py 2016-08-29 20:05:49.000000000 +0000 @@ -173,7 +173,7 @@ 'fake_port', 'tap12345678-12', 'aa:bb:cc:dd:ee:ffa', - bridge='br-ex', + bridge='', namespace=namespace), mock.call.init_l3('tap12345678-12', ['10.0.0.3/24'], @@ -223,7 +223,7 @@ self.driver.assert_has_calls([mock.call.get_device_name(mock.ANY), mock.call.unplug('tap12345678-12', namespace=namespace, - bridge='br-ex')]) + bridge='')]) def test_list_probe(self): cmd = commands.ListProbe(self.app, None) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -23,7 +23,6 @@ from webob import exc from neutron.api.v2 import attributes -from neutron.common import constants as n_const from neutron import context from neutron.db import agents_db from neutron.db import db_base_plugin_v2 @@ -83,9 +82,9 @@ def _register_agent_states(self, lbaas_agents=False): """Register two L3 agents and two DHCP agents.""" l3_hosta = helpers._get_l3_agent_dict( - L3_HOSTA, n_const.L3_AGENT_MODE_LEGACY) + L3_HOSTA, constants.L3_AGENT_MODE_LEGACY) l3_hostb = helpers._get_l3_agent_dict( - L3_HOSTB, n_const.L3_AGENT_MODE_LEGACY) + L3_HOSTB, constants.L3_AGENT_MODE_LEGACY) dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA) dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC) helpers.register_l3_agent(host=L3_HOSTA) @@ -118,9 +117,9 @@ def _register_dvr_agents(self): dvr_snat_agent = helpers.register_l3_agent( - host=L3_HOSTA, agent_mode=n_const.L3_AGENT_MODE_DVR_SNAT) + host=L3_HOSTA, agent_mode=constants.L3_AGENT_MODE_DVR_SNAT) dvr_agent = helpers.register_l3_agent( - host=L3_HOSTB, agent_mode=n_const.L3_AGENT_MODE_DVR) + host=L3_HOSTB, agent_mode=constants.L3_AGENT_MODE_DVR) return [dvr_snat_agent, dvr_agent] def _register_l3_agent(self, host): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_availability_zone.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_availability_zone.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_availability_zone.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_availability_zone.py 2016-08-03 20:10:34.000000000 +0000 @@ -94,9 +94,8 @@ class TestAZNetworkCase(AZTestCommon): def setUp(self): - plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' ext_mgr = AZExtensionManager() - super(TestAZNetworkCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + super(TestAZNetworkCase, self).setUp(plugin='ml2', ext_mgr=ext_mgr) def test_availability_zones_in_create_response(self): with self.network() as net: diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_default_subnetpools.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_default_subnetpools.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_default_subnetpools.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_default_subnetpools.py 2016-08-29 20:05:49.000000000 +0000 @@ -79,32 +79,6 @@ self.assertEqual(27, ip_net.prefixlen) self.assertEqual(subnetpool_id, subnet['subnetpool_id']) - def test_create_subnet_only_ip_version_v4_old(self): - # TODO(john-davidge): Remove after Mitaka release. - with self.network() as network: - tenant_id = network['network']['tenant_id'] - subnetpool_prefix = '10.0.0.0/8' - with self.subnetpool(prefixes=[subnetpool_prefix], - admin=False, - name="My subnet pool", - tenant_id=tenant_id, - min_prefixlen='25') as subnetpool: - subnetpool_id = subnetpool['subnetpool']['id'] - cfg.CONF.set_override('default_ipv4_subnet_pool', - subnetpool_id) - data = {'subnet': {'network_id': network['network']['id'], - 'ip_version': '4', - 'prefixlen': '27', - 'tenant_id': tenant_id, - 'use_default_subnetpool': True}} - subnet_req = self.new_create_request('subnets', data) - res = subnet_req.get_response(self.api) - subnet = self.deserialize(self.fmt, res)['subnet'] - ip_net = netaddr.IPNetwork(subnet['cidr']) - self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) - self.assertEqual(27, ip_net.prefixlen) - self.assertEqual(subnetpool_id, subnet['subnetpool_id']) - def test_create_subnet_only_ip_version_v6(self): # this test mirrors its v4 counterpart with self.network() as network: @@ -120,32 +94,6 @@ cfg.CONF.set_override('ipv6_pd_enabled', False) data = {'subnet': {'network_id': network['network']['id'], 'ip_version': '6', - 'tenant_id': tenant_id, - 'use_default_subnetpool': True}} - subnet_req = self.new_create_request('subnets', data) - res = subnet_req.get_response(self.api) - subnet = self.deserialize(self.fmt, res)['subnet'] - self.assertEqual(subnetpool_id, subnet['subnetpool_id']) - ip_net = netaddr.IPNetwork(subnet['cidr']) - self.assertIn(ip_net, netaddr.IPNetwork(subnetpool_prefix)) - self.assertEqual(64, ip_net.prefixlen) - - def test_create_subnet_only_ip_version_v6_old(self): - # TODO(john-davidge): Remove after Mitaka release. - with self.network() as network: - tenant_id = network['network']['tenant_id'] - subnetpool_prefix = '2000::/56' - with self.subnetpool(prefixes=[subnetpool_prefix], - admin=False, - name="My ipv6 subnet pool", - tenant_id=tenant_id, - min_prefixlen='64') as subnetpool: - subnetpool_id = subnetpool['subnetpool']['id'] - cfg.CONF.set_override('default_ipv6_subnet_pool', - subnetpool_id) - cfg.CONF.set_override('ipv6_pd_enabled', False) - data = {'subnet': {'network_id': network['network']['id'], - 'ip_version': '6', 'tenant_id': tenant_id, 'use_default_subnetpool': True}} subnet_req = self.new_create_request('subnets', data) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_dns.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_dns.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_dns.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_dns.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,16 +15,17 @@ import math import netaddr -from neutron_lib import constants as n_const +from neutron_lib import constants from oslo_config import cfg -from neutron.common import constants from neutron.common import utils from neutron import context from neutron.db import db_base_plugin_v2 from neutron.extensions import dns from neutron import manager +from neutron.plugins.ml2 import config from neutron.tests.unit.db import test_db_base_plugin_v2 +from neutron.tests.unit.plugins.ml2 import test_plugin class DnsExtensionManager(object): @@ -49,15 +50,17 @@ supported_extension_aliases = ["dns-integration", "router"] -class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): +class DnsExtensionTestCase(test_plugin.Ml2PluginV2TestCase): """Test API extension dns attributes. """ + _extension_drivers = ['dns'] + def setUp(self): - plugin = ('neutron.tests.unit.extensions.test_dns.' + - 'DnsExtensionTestPlugin') - ext_mgr = DnsExtensionManager() - super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + config.cfg.CONF.set_override('extension_drivers', + self._extension_drivers, + group='ml2') + super(DnsExtensionTestCase, self).setUp() def _create_network(self, fmt, name, admin_state_up, arg_list=None, set_context=False, tenant_id=None, @@ -88,7 +91,7 @@ data['port'][arg] = kwargs[arg] # create a dhcp port device id if one hasn't been supplied if ('device_owner' in kwargs and - kwargs['device_owner'] == n_const.DEVICE_OWNER_DHCP and + kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and 'host' in kwargs and 'device_id' not in kwargs): device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host']) @@ -496,6 +499,16 @@ dns_name=dns_name) self.assertEqual(201, res.status_code) + +class DnsExtensionTestNetworkDnsDomain( + test_db_base_plugin_v2.NeutronDbPluginV2TestCase): + def setUp(self): + plugin = ('neutron.tests.unit.extensions.test_dns.' + + 'DnsExtensionTestPlugin') + ext_mgr = DnsExtensionManager() + super(DnsExtensionTestNetworkDnsDomain, self).setUp( + plugin=plugin, ext_mgr=ext_mgr) + def test_update_network_dns_domain(self): with self.network() as network: data = {'network': {'dns_domain': 'my-domain.org.'}} diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_flavors.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_flavors.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_flavors.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_flavors.py 2016-08-29 20:05:49.000000000 +0000 @@ -461,7 +461,7 @@ self.service_manager.add_provider_configuration( provider.split(':')[0], provconf.ProviderConfiguration()) - dbapi.get_engine() + dbapi.context_manager.get_legacy_facade().get_engine() def _create_flavor(self, description=None): flavor = {'flavor': {'name': 'GOLD', diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_l3_ext_gw_mode.py 2016-08-03 20:10:34.000000000 +0000 @@ -23,6 +23,7 @@ import testscenarios from webob import exc +from neutron import context as nctx from neutron.db import api as db_api from neutron.db import external_net_db from neutron.db import l3_db @@ -30,6 +31,7 @@ from neutron.db import models_v2 from neutron.extensions import l3 from neutron.extensions import l3_ext_gw_mode +from neutron import manager from neutron.tests import base from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 @@ -390,6 +392,20 @@ expected_code=expected_code, neutron_context=neutron_context) + def test_router_gateway_set_fail_after_port_create(self): + with self.router() as r, self.subnet() as s: + ext_net_id = s['subnet']['network_id'] + self._set_net_external(ext_net_id) + plugin = manager.NeutronManager.get_plugin() + with mock.patch.object(plugin, '_get_port', + side_effect=ValueError()): + self._set_router_external_gateway(r['router']['id'], + ext_net_id, + expected_code=500) + ports = [p for p in plugin.get_ports(nctx.get_admin_context()) + if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW] + self.assertFalse(ports) + def test_router_gateway_set_retry(self): with self.router() as r, self.subnet() as s: ext_net_id = s['subnet']['network_id'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_l3.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_l3.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_l3.py 2016-06-27 15:08:17.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_l3.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,7 +19,7 @@ import mock import netaddr -from neutron_lib import constants as l3_constants +from neutron_lib import constants as lib_constants from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_utils import importutils @@ -34,7 +34,6 @@ from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources -from neutron.common import constants as n_const from neutron import context from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 @@ -66,7 +65,7 @@ _get_path = test_base._get_path -DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' +DEVICE_OWNER_COMPUTE = lib_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' class L3TestExtensionManager(object): @@ -322,7 +321,7 @@ self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.agent_notifiers.update( - {l3_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) + {lib_constants.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) class L3NatTestCaseMixin(object): @@ -932,8 +931,8 @@ self._create_subnet(self.fmt, net_id=n['network']['id'], ip_version=6, cidr='2001:db8:1::/64', - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC)) + ipv6_ra_mode=lib_constants.IPV6_SLAAC, + ipv6_address_mode=lib_constants.IPV6_SLAAC)) res3 = self._show('routers', r['router']['id']) fips = (res3['router']['external_gateway_info'] ['external_fixed_ips']) @@ -1015,9 +1014,9 @@ Verify the valid use-cases of an IPv6 subnet where we are allowed to associate to the Neutron Router are successful. """ - slaac = n_const.IPV6_SLAAC - stateful = n_const.DHCPV6_STATEFUL - stateless = n_const.DHCPV6_STATELESS + slaac = lib_constants.IPV6_SLAAC + stateful = lib_constants.DHCPV6_STATEFUL + stateless = lib_constants.DHCPV6_STATELESS use_cases = [{'msg': 'IPv6 Subnet Modes (slaac, none)', 'ra_mode': slaac, 'address_mode': None}, {'msg': 'IPv6 Subnet Modes (none, none)', @@ -1136,13 +1135,13 @@ """ use_cases = [{'msg': 'IPv6 Subnet Modes (none, slaac)', 'ra_mode': None, - 'address_mode': n_const.IPV6_SLAAC}, + 'address_mode': lib_constants.IPV6_SLAAC}, {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateful)', 'ra_mode': None, - 'address_mode': n_const.DHCPV6_STATEFUL}, + 'address_mode': lib_constants.DHCPV6_STATEFUL}, {'msg': 'IPv6 Subnet Modes (none, dhcpv6-stateless)', 'ra_mode': None, - 'address_mode': n_const.DHCPV6_STATELESS}] + 'address_mode': lib_constants.DHCPV6_STATELESS}] for uc in use_cases: with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', @@ -1227,9 +1226,9 @@ self.assertIn('port_id', body) self.assertEqual(p['port']['id'], body['port_id']) expected_port_update = { - 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_INTF, + 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_INTF, 'device_id': r['router']['id']} - update_port.assert_called_with( + update_port.assert_any_call( mock.ANY, p['port']['id'], {'port': expected_port_update}) # fetch port and confirm device_id body = self._show('ports', p['port']['id']) @@ -1241,6 +1240,77 @@ None, p['port']['id']) + def test_router_add_interface_delete_port_after_failure(self): + with self.router() as r, self.subnet(enable_dhcp=False) as s: + plugin = manager.NeutronManager.get_plugin() + # inject a failure in the update port that happens at the end + # to ensure the port gets deleted + with mock.patch.object( + plugin, 'update_port', + side_effect=n_exc.InvalidInput(error_message='x')): + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None, + exc.HTTPBadRequest.code) + self.assertFalse(plugin.get_ports(context.get_admin_context())) + + def test_router_add_interface_dup_port(self): + '''This tests that if multiple routers add one port as their + interfaces. Only the first router's interface would be added + to this port. All the later requests would return exceptions. + ''' + with self.router() as r1, self.router() as r2, self.network() as n: + with self.subnet(network=n) as s: + with self.port(subnet=s) as p: + self._router_interface_action('add', + r1['router']['id'], + None, + p['port']['id']) + # mock out the sequential check + plugin = 'neutron.db.l3_db.L3_NAT_dbonly_mixin' + check_p = mock.patch(plugin + '._check_router_port', + port_id=p['port']['id'], + device_id=r2['router']['id'], + return_value=p['port']) + checkport = check_p.start() + # do regular checkport after first skip + checkport.side_effect = check_p.stop() + self._router_interface_action('add', + r2['router']['id'], + None, + p['port']['id'], + exc.HTTPConflict.code) + # clean-up + self._router_interface_action('remove', + r1['router']['id'], + None, + p['port']['id']) + + def _assert_body_port_id_and_update_port(self, body, mock_update_port, + port_id, device_id): + self.assertNotIn('port_id', body) + expected_port_update_before_update = { + 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_INTF, + 'device_id': device_id} + expected_port_update_after_fail = { + 'device_owner': '', + 'device_id': ''} + mock_update_port.assert_has_calls( + [mock.call( + mock.ANY, + port_id, + {'port': expected_port_update_before_update}), + mock.call( + mock.ANY, + port_id, + {'port': expected_port_update_after_fail})], + any_order=False) + # fetch port and confirm device_id and device_owner + body = self._show('ports', port_id) + self.assertEqual('', body['port']['device_owner']) + self.assertEqual('', body['port']['device_id']) + def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): """Test adding router port with multiple IPv4 subnets fails. @@ -1252,13 +1322,17 @@ self.subnet(network=n, cidr='10.0.1.0/24')) as s2: fixed_ips = [{'subnet_id': s1['subnet']['id']}, {'subnet_id': s2['subnet']['id']}] - with self.port(subnet=s1, fixed_ips=fixed_ips) as p: + orig_update_port = self.plugin.update_port + with self.port(subnet=s1, fixed_ips=fixed_ips) as p, ( + mock.patch.object(self.plugin, + 'update_port')) as update_port: + update_port.side_effect = orig_update_port exp_code = exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - None, - p['port']['id'], - expected_code=exp_code) + body = self._router_interface_action( + 'add', r['router']['id'], None, p['port']['id'], + expected_code=exp_code) + self._assert_body_port_id_and_update_port( + body, update_port, p['port']['id'], r['router']['id']) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. @@ -1273,17 +1347,21 @@ ip_version=6) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6)) as s2: - with self.port(subnet=s1) as p: + orig_update_port = self.plugin.update_port + with self.port(subnet=s1) as p, ( + mock.patch.object(self.plugin, + 'update_port')) as update_port: + update_port.side_effect = orig_update_port self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) exp_code = exc.HTTPBadRequest.code - self._router_interface_action('add', - r['router']['id'], - None, - p['port']['id'], - expected_code=exp_code) + body = self._router_interface_action( + 'add', r['router']['id'], None, p['port']['id'], + expected_code=exp_code) + self._assert_body_port_id_and_update_port( + body, update_port, p['port']['id'], r['router']['id']) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], @@ -1352,7 +1430,7 @@ with self.network() as network, self.router() as r: # Create a router port without ips p = self._make_port(self.fmt, network['network']['id'], - device_owner=l3_constants.DEVICE_OWNER_ROUTER_INTF) + device_owner=lib_constants.DEVICE_OWNER_ROUTER_INTF) err_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], @@ -1378,18 +1456,22 @@ with self.router() as r: with self.subnet() as s1, self.subnet(cidr='1.0.0.0/24') as s2: with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2: - with self.port(subnet=s1) as p3: + orig_update_port = self.plugin.update_port + with self.port(subnet=s1) as p3, ( + mock.patch.object(self.plugin, + 'update_port')) as update_port: + update_port.side_effect = orig_update_port for p in [p1, p2]: self._router_interface_action('add', r['router']['id'], None, p['port']['id']) - self._router_interface_action('add', - r['router']['id'], - None, - p3['port']['id'], - expected_code=exc. - HTTPBadRequest.code) + body = self._router_interface_action( + 'add', r['router']['id'], None, p3['port']['id'], + expected_code=exc.HTTPBadRequest.code) + self._assert_body_port_id_and_update_port( + body, update_port, p3['port']['id'], + r['router']['id']) def test_router_add_interface_overlapped_cidr_returns_400(self): with self.router() as r: @@ -1484,18 +1566,18 @@ self.subnet( cidr='2001:db8::/64', network=n, ip_version=6, - ipv6_ra_mode=n_const.IPV6_SLAAC, - ipv6_address_mode=n_const.IPV6_SLAAC)) as s3, ( + ipv6_ra_mode=lib_constants.IPV6_SLAAC, + ipv6_address_mode=lib_constants.IPV6_SLAAC)) as s3, ( self.subnet( cidr='2001:db8:1::/64', network=n, ip_version=6, - ipv6_ra_mode=n_const.DHCPV6_STATEFUL, - ipv6_address_mode=n_const.DHCPV6_STATEFUL)) as s4, ( + ipv6_ra_mode=lib_constants.DHCPV6_STATEFUL, + ipv6_address_mode=lib_constants.DHCPV6_STATEFUL)) as s4, ( self.subnet( cidr='2001:db8:2::/64', network=n, ip_version=6, - ipv6_ra_mode=n_const.DHCPV6_STATELESS, - ipv6_address_mode=n_const.DHCPV6_STATELESS)) as s5: + ipv6_ra_mode=lib_constants.DHCPV6_STATELESS, + ipv6_address_mode=lib_constants.DHCPV6_STATELESS)) as s5: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], @@ -1563,7 +1645,7 @@ with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n): - for device_owner in l3_constants.ROUTER_INTERFACE_OWNERS: + for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS: self._create_port( self.fmt, n['network']['id'], tenant_id='tenant_a', @@ -1581,7 +1663,7 @@ with self.network(tenant_id='tenant_a', set_context=True) as n: with self.subnet(network=n): - for device_owner in l3_constants.ROUTER_INTERFACE_OWNERS: + for device_owner in lib_constants.ROUTER_INTERFACE_OWNERS: port_res = self._create_port( self.fmt, n['network']['id'], tenant_id='tenant_a', @@ -1941,15 +2023,15 @@ self.assertEqual(400, res.status_int) for p in self._list('ports')['ports']: if (p['device_owner'] == - l3_constants.DEVICE_OWNER_FLOATINGIP): + lib_constants.DEVICE_OWNER_FLOATINGIP): self.fail('garbage port is not deleted') def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( - 'neutron.db.l3_db.L3_NAT_db_mixin._check_and_get_fip_assoc') + 'neutron.db.l3_db.L3_NAT_dbonly_mixin._check_and_get_fip_assoc') def test_create_floatingip_with_assoc( - self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): with self.floatingip_with_assoc() as fip: body = self._show('floatingips', fip['floatingip']['id']) self.assertEqual(body['floatingip']['id'], @@ -1993,11 +2075,11 @@ with self.floatingip_with_assoc(): port_body = self._list('ports', query_params='device_owner=network:floatingip')['ports'][0] - self.assertEqual(l3_constants.PORT_STATUS_NOTAPPLICABLE, + self.assertEqual(lib_constants.PORT_STATUS_NOTAPPLICABLE, port_body['status']) def test_floatingip_update( - self, expected_status=l3_constants.FLOATINGIP_STATUS_ACTIVE): + self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} @@ -2096,6 +2178,45 @@ self.assertEqual(str(ip_range[-2]), body_2['floatingip']['fixed_ip_address']) + def test_floatingip_update_invalid_fixed_ip(self): + with self.subnet() as s: + with self.port(subnet=s) as p: + with self.floatingip_with_assoc( + port_id=p['port']['id']) as fip: + self._update( + 'floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': p['port']['id'], + 'fixed_ip_address': '2001:db8::a'}}, + expected_code=exc.HTTPBadRequest.code) + + def test_floatingip_update_to_same_port_id_twice( + self, expected_status=lib_constants.FLOATINGIP_STATUS_ACTIVE): + with self.port() as p: + private_sub = {'subnet': {'id': + p['port']['fixed_ips'][0]['subnet_id']}} + with self.floatingip_no_assoc(private_sub) as fip: + body = self._show('floatingips', fip['floatingip']['id']) + self.assertIsNone(body['floatingip']['port_id']) + self.assertIsNone(body['floatingip']['fixed_ip_address']) + self.assertEqual(expected_status, body['floatingip']['status']) + + port_id = p['port']['id'] + ip_address = p['port']['fixed_ips'][0]['ip_address'] + # 1. Update floating IP with port_id (associate) + body = self._update('floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}) + self.assertEqual(port_id, body['floatingip']['port_id']) + self.assertEqual(ip_address, + body['floatingip']['fixed_ip_address']) + + # 2. Update floating IP with same port again + body = self._update('floatingips', fip['floatingip']['id'], + {'floatingip': {'port_id': port_id}}) + # No errors, and nothing changed + self.assertEqual(port_id, body['floatingip']['port_id']) + self.assertEqual(ip_address, + body['floatingip']['fixed_ip_address']) + def test_first_floatingip_associate_notification(self): with self.port() as p: private_sub = {'subnet': {'id': @@ -2266,7 +2387,7 @@ found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: - if p['device_owner'] == l3_constants.DEVICE_OWNER_FLOATINGIP: + if p['device_owner'] == lib_constants.DEVICE_OWNER_FLOATINGIP: self._delete('ports', p['id'], expected_code=exc.HTTPConflict.code) found = True @@ -2402,6 +2523,12 @@ uuidutils.generate_uuid(), 'iamnotnanip') self.assertEqual(400, res.status_int) + def test_create_floatingip_invalid_fixed_ipv6_address_returns_400(self): + # API-level test - no need to create all objects for l3 plugin + res = self._create_floatingip(self.fmt, uuidutils.generate_uuid(), + uuidutils.generate_uuid(), '2001:db8::a') + self.assertEqual(400, res.status_int) + def test_floatingip_list_with_sort(self): with self.subnet(cidr="10.0.0.0/24") as s1,\ self.subnet(cidr="11.0.0.0/24") as s2,\ @@ -2571,7 +2698,7 @@ ).filter( models_v2.Port.network_id == internal_port['network_id'], l3_db.RouterPort.port_type.in_( - l3_constants.ROUTER_INTERFACE_OWNERS + lib_constants.ROUTER_INTERFACE_OWNERS ), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join( @@ -2610,7 +2737,7 @@ found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: - if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF: + if p['device_owner'] == lib_constants.DEVICE_OWNER_ROUTER_INTF: subnet_id = p['fixed_ips'][0]['subnet_id'] router_id = p['device_id'] self._router_interface_action( @@ -2624,7 +2751,7 @@ found = False with self.floatingip_with_assoc(): for p in self._list('ports')['ports']: - if p['device_owner'] == l3_constants.DEVICE_OWNER_ROUTER_INTF: + if p['device_owner'] == lib_constants.DEVICE_OWNER_ROUTER_INTF: router_id = p['device_id'] self._router_interface_action( 'remove', router_id, None, p['id'], @@ -2656,12 +2783,12 @@ def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): with self.router() as r: - with self._ipv6_subnet(n_const.IPV6_SLAAC) as s: + with self._ipv6_subnet(lib_constants.IPV6_SLAAC) as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): with self.router() as r: - with self._ipv6_subnet(n_const.DHCPV6_STATELESS) as s: + with self._ipv6_subnet(lib_constants.DHCPV6_STATELESS) as s: self._test_router_delete_subnet_inuse_returns_409(r, s) def test_delete_ext_net_with_disassociated_floating_ips(self): @@ -2795,6 +2922,128 @@ fip['floatingip']['floating_ip_address']) self.assertEqual(4, floating_ip.version) + def test_create_router_gateway_fails_nested(self): + # Force _update_router_gw_info failure + plugin = manager.NeutronManager.get_service_plugins()[ + service_constants.L3_ROUTER_NAT] + if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): + self.skipTest("Plugin is not L3_NAT_dbonly_mixin") + ctx = context.Context('', 'foo') + data = {'router': { + 'name': 'router1', 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'some_uuid'}, + 'tenant_id': 'some_tenant'}} + + def mock_fail__update_router_gw_info(ctx, router_id, info, + router=None): + # Fail with breaking transaction + with ctx.session.begin(subtransactions=True): + raise n_exc.NeutronException + + mock.patch.object(plugin, '_update_router_gw_info', + side_effect=mock_fail__update_router_gw_info).start() + + def create_router_with_transaction(ctx, data): + # Emulates what many plugins do + with ctx.session.begin(subtransactions=True): + plugin.create_router(ctx, data) + + # Verify router doesn't persist on failure + self.assertRaises(n_exc.NeutronException, + create_router_with_transaction, ctx, data) + routers = plugin.get_routers(ctx) + self.assertEqual(0, len(routers)) + + def test_create_router_gateway_fails_nested_delete_router_failed(self): + # Force _update_router_gw_info failure + plugin = manager.NeutronManager.get_service_plugins()[ + service_constants.L3_ROUTER_NAT] + if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): + self.skipTest("Plugin is not L3_NAT_dbonly_mixin") + ctx = context.Context('', 'foo') + data = {'router': { + 'name': 'router1', 'admin_state_up': True, + 'external_gateway_info': {'network_id': 'some_uuid'}, + 'tenant_id': 'some_tenant'}} + + def mock_fail__update_router_gw_info(ctx, router_id, info, + router=None): + # Fail with breaking transaction + with ctx.session.begin(subtransactions=True): + raise n_exc.NeutronException + + def mock_fail_delete_router(ctx, router_id): + with ctx.session.begin(subtransactions=True): + raise Exception() + + mock.patch.object(plugin, '_update_router_gw_info', + side_effect=mock_fail__update_router_gw_info).start() + mock.patch.object(plugin, 'delete_router', + mock_fail_delete_router).start() + + def create_router_with_transaction(ctx, data): + # Emulates what many plugins do + with ctx.session.begin(subtransactions=True): + plugin.create_router(ctx, data) + + # Verify router doesn't persist on failure + self.assertRaises(n_exc.NeutronException, + create_router_with_transaction, ctx, data) + routers = plugin.get_routers(ctx) + self.assertEqual(0, len(routers)) + + def test_router_add_interface_by_port_fails_nested(self): + # Force _validate_router_port_info failure + plugin = manager.NeutronManager.get_service_plugins()[ + service_constants.L3_ROUTER_NAT] + if not isinstance(plugin, l3_db.L3_NAT_dbonly_mixin): + self.skipTest("Plugin is not L3_NAT_dbonly_mixin") + orig_update_port = self.plugin.update_port + + def mock_fail__validate_router_port_info(ctx, router, port_id): + # Fail with raising BadRequest exception + msg = "Failure mocking..." + raise n_exc.BadRequest(resource='router', msg=msg) + + def mock_update_port_with_transaction(ctx, id, port): + # Update port within a sub-transaction + with ctx.session.begin(subtransactions=True): + orig_update_port(ctx, id, port) + + def add_router_interface_with_transaction(ctx, router_id, + interface_info): + # Call add_router_interface() within a sub-transaction + with ctx.session.begin(): + plugin.add_router_interface(ctx, router_id, interface_info) + + tenant_id = _uuid() + ctx = context.Context('', tenant_id) + with self.network(tenant_id=tenant_id) as network, ( + self.router(name='router1', admin_state_up=True, + tenant_id=tenant_id)) as router: + with self.subnet(network=network, cidr='10.0.0.0/24', + tenant_id=tenant_id) as subnet: + fixed_ips = [{'subnet_id': subnet['subnet']['id']}] + with self.port(subnet=subnet, fixed_ips=fixed_ips, + tenant_id=tenant_id) as port: + mock.patch.object( + self.plugin, 'update_port', + side_effect=( + mock_update_port_with_transaction)).start() + mock.patch.object( + plugin, '_validate_router_port_info', + side_effect=( + mock_fail__validate_router_port_info)).start() + self.assertRaises(n_exc.BadRequest, + add_router_interface_with_transaction, + ctx, router['router']['id'], + {'port_id': port['port']['id']}) + + # fetch port and confirm device_id and device_owner + body = self._show('ports', port['port']['id']) + self.assertEqual('', body['port']['device_owner']) + self.assertEqual('', body['port']['device_id']) + def test_update_subnet_gateway_for_external_net(self): """Test to make sure notification to routers occurs when the gateway ip address of a subnet of the external network is changed. @@ -2817,7 +3066,7 @@ allocation_pools=allocation_pools, cidr='120.0.0.0/24') as subnet: kwargs = { - 'device_owner': l3_constants.DEVICE_OWNER_ROUTER_GW, + 'device_owner': lib_constants.DEVICE_OWNER_ROUTER_GW, 'device_id': 'fake_device'} with self.port(subnet=subnet, **kwargs): data = {'subnet': {'gateway_ip': '120.0.0.2'}} @@ -2877,7 +3126,7 @@ routers = self.plugin.get_sync_data( context.get_admin_context(), None) self.assertEqual(1, len(routers)) - interfaces = routers[0][l3_constants.INTERFACE_KEY] + interfaces = routers[0][lib_constants.INTERFACE_KEY] self.assertEqual(1, len(interfaces)) subnets = interfaces[0]['subnets'] self.assertEqual(1, len(subnets)) @@ -2939,7 +3188,8 @@ self.core_plugin.update_port(ctx, p['port']['id'], port) routers = self.plugin.get_sync_data(ctx, None) self.assertEqual(1, len(routers)) - interfaces = routers[0].get(l3_constants.INTERFACE_KEY, []) + interfaces = routers[0].get(lib_constants.INTERFACE_KEY, + []) self.assertEqual(1, len(interfaces)) def test_l3_agent_routers_query_gateway(self): @@ -2965,7 +3215,7 @@ routers = self.plugin.get_sync_data( context.get_admin_context(), [fip['floatingip']['router_id']]) self.assertEqual(1, len(routers)) - floatingips = routers[0][l3_constants.FLOATINGIP_KEY] + floatingips = routers[0][lib_constants.FLOATINGIP_KEY] self.assertEqual(1, len(floatingips)) self.assertEqual(floatingips[0]['id'], fip['floatingip']['id']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_netmtu.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_netmtu.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_netmtu.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_netmtu.py 2016-08-03 20:10:34.000000000 +0000 @@ -12,6 +12,8 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg + from neutron.common import constants from neutron.db import db_base_plugin_v2 from neutron.db import netmtu_db @@ -64,11 +66,20 @@ self.assertEqual(constants.DEFAULT_NETWORK_MTU, res['networks'][0].get('mtu')) + def _assert_network_mtu(self, net_id, expected_mtu): + req = self.new_show_request('networks', net_id) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(expected_mtu, res['network']['mtu']) + def test_show_network_mtu(self): with self.network(name='net1') as net: - req = self.new_show_request('networks', net['network']['id']) - res = self.deserialize(self.fmt, req.get_response(self.api)) - self.assertEqual(res['network']['name'], - net['network']['name']) - self.assertEqual(constants.DEFAULT_NETWORK_MTU, - res['network']['mtu']) + self._assert_network_mtu( + net['network']['id'], constants.DEFAULT_NETWORK_MTU) + + def test_network_mtu_immediately_reflects_config_option(self): + with self.network(name='net1') as net: + self._assert_network_mtu( + net['network']['id'], cfg.CONF.global_physnet_mtu) + cfg.CONF.set_override('global_physnet_mtu', 1400) + self._assert_network_mtu( + net['network']['id'], cfg.CONF.global_physnet_mtu) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_network_ip_availability.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_network_ip_availability.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_network_ip_availability.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_network_ip_availability.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from neutron_lib import constants + import neutron.api.extensions as api_ext import neutron.common.config as config -import neutron.common.constants as constants import neutron.extensions import neutron.services.network_ip_availability.plugin as plugin_module import neutron.tests.unit.db.test_db_base_plugin_v2 as test_db_base_plugin_v2 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_quotasv2.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_quotasv2.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_quotasv2.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_quotasv2.py 2016-08-03 20:10:34.000000000 +0000 @@ -54,13 +54,8 @@ self.config_parse() # Update the plugin and extensions path - self.setup_coreplugin(TARGET_PLUGIN) - cfg.CONF.set_override( - 'quota_items', - ['network', 'subnet', 'port', 'extra1'], - group='QUOTAS') + self.setup_coreplugin('ml2') quota.QUOTAS = quota.QuotaEngine() - quota.register_resources_from_config() self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) self.plugin = self._plugin_patcher.start() self.plugin.return_value.supported_extension_aliases = ['quotas'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_segment.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_segment.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_segment.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_segment.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,15 +15,23 @@ import mock import netaddr from neutron_lib import constants +from neutron_lib import exceptions as n_exc from oslo_utils import uuidutils import webob.exc from neutron.api.v2 import attributes +from neutron.callbacks import events +from neutron.callbacks import exceptions +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron import context from neutron.db import agents_db +from neutron.db import agentschedulers_db from neutron.db import db_base_plugin_v2 from neutron.db import portbindings_db from neutron.db import segments_db +from neutron.extensions import ip_allocation +from neutron.extensions import l2_adjacency from neutron.extensions import portbindings from neutron.extensions import segment as ext_segment from neutron.plugins.common import constants as p_constants @@ -36,6 +44,9 @@ SERVICE_PLUGIN_KLASS = 'neutron.services.segments.plugin.Plugin' TEST_PLUGIN_KLASS = ( 'neutron.tests.unit.extensions.test_segment.SegmentTestPlugin') +DHCP_HOSTA = 'dhcp-host-a' +DHCP_HOSTB = 'dhcp-host-b' +HTTP_NOT_FOUND = 404 class SegmentTestExtensionManager(object): @@ -82,6 +93,7 @@ def _make_segment(self, fmt, **kwargs): res = self._create_segment(fmt, **kwargs) if res.status_int >= webob.exc.HTTPClientError.code: + res.charset = 'utf8' raise webob.exc.HTTPClientError( code=res.status_int, explanation=str(res)) return self.deserialize(fmt, res) @@ -106,7 +118,7 @@ __native_pagination_support = True __native_sorting_support = True - supported_extension_aliases = ["segment", "binding"] + supported_extension_aliases = ["segment", "binding", "ip_allocation"] def get_plugin_description(self): return "Network Segments" @@ -121,6 +133,13 @@ context, port['port'], port_dict) return port_dict + def update_port(self, context, id, port): + port_dict = super(SegmentTestPlugin, self).update_port( + context, id, port) + self._process_portbindings_create_and_update( + context, port['port'], port_dict) + return port_dict + class TestSegment(SegmentTestCase): @@ -136,6 +155,15 @@ segmentation_id=200, expected=expected_segment) + def test_create_segment_non_existent_network(self): + exc = self.assertRaises(webob.exc.HTTPClientError, + self._test_create_segment, + network_id=uuidutils.generate_uuid(), + physical_network='phys_net', + segmentation_id=200) + self.assertEqual(HTTP_NOT_FOUND, exc.code) + self.assertIn('NetworkNotFound', exc.explanation) + def test_create_segment_no_phys_net(self): with self.network() as network: network = network['network'] @@ -148,24 +176,82 @@ expected=expected_segment) def test_create_segment_no_segmentation_id(self): + + def _mock_reserve_segmentation_id(rtype, event, trigger, + context, segment): + if not segment.get('segmentation_id'): + segment['segmentation_id'] = 200 + with self.network() as network: network = network['network'] + + registry.subscribe(_mock_reserve_segmentation_id, resources.SEGMENT, + events.PRECOMMIT_CREATE) expected_segment = {'network_id': network['id'], 'physical_network': 'phys_net', 'network_type': 'net_type', - 'segmentation_id': None} + 'segmentation_id': 200} self._test_create_segment(network_id=network['id'], physical_network='phys_net', expected=expected_segment) + def test_create_segment_with_exception_in_core_plugin(self): + cxt = context.get_admin_context() + with self.network() as network: + network = network['network'] + + with mock.patch.object(registry, 'notify') as notify: + notify.side_effect = exceptions.CallbackFailure(errors=Exception) + self.assertRaises(webob.exc.HTTPClientError, + self.segment, + network_id=network['id'], + segmentation_id=200) + + network_segments = segments_db.get_network_segments(cxt.session, + network['id']) + self.assertEqual([], network_segments) + + def test_create_segments_in_certain_order(self): + cxt = context.get_admin_context() + with self.network() as network: + network = network['network'] + segment1 = self.segment( + network_id=network['id'], segmentation_id=200) + segment2 = self.segment( + network_id=network['id'], segmentation_id=201) + segment3 = self.segment( + network_id=network['id'], segmentation_id=202) + network_segments = segments_db.get_network_segments(cxt.session, + network['id']) + self.assertEqual(segment1['segment']['id'], + network_segments[0]['id']) + self.assertEqual(segment2['segment']['id'], + network_segments[1]['id']) + self.assertEqual(segment3['segment']['id'], + network_segments[2]['id']) + def test_delete_segment(self): with self.network() as network: network = network['network'] - segment = self.segment(network_id=network['id']) + self.segment(network_id=network['id'], segmentation_id=200) + segment = self.segment(network_id=network['id'], segmentation_id=201) self._delete('segments', segment['segment']['id']) self._show('segments', segment['segment']['id'], expected_code=webob.exc.HTTPNotFound.code) + def test_delete_segment_failed_with_subnet_associated(self): + with self.network() as network: + net = network['network'] + + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) + segment_id = segment['segment']['id'] + with self.subnet(network=network, segment_id=segment_id): + self._delete('segments', segment_id, + expected_code=webob.exc.HTTPConflict.code) + exist_segment = self._show('segments', segment_id) + self.assertEqual(segment_id, exist_segment['segment']['id']) + def test_get_segment(self): with self.network() as network: network = network['network'] @@ -184,17 +270,42 @@ segmentation_id=200) self._test_create_segment(network_id=network['id'], physical_network='phys_net2', - segmentation_id=200) + segmentation_id=201) res = self._list('segments') self.assertEqual(2, len(res['segments'])) + def test_update_segments(self): + with self.network() as network: + net = network['network'] + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) + segment['segment']['segmentation_id'] = '201' + self._update('segments', segment['segment']['id'], segment, + expected_code=webob.exc.HTTPClientError.code) + + +class TestSegmentML2(SegmentTestCase): + def setUp(self): + super(TestSegmentML2, self).setUp(plugin='ml2') + + def test_segment_notification_on_create_network(self): + with mock.patch.object(registry, 'notify') as notify: + with self.network(): + pass + notify.assert_any_call(resources.SEGMENT, + events.PRECOMMIT_CREATE, + context=mock.ANY, + segment=mock.ANY, + trigger=mock.ANY) + class TestSegmentSubnetAssociation(SegmentTestCase): def test_basic_association(self): with self.network() as network: net = network['network'] - segment = self._test_create_segment(network_id=net['id']) + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) segment_id = segment['segment']['id'] with self.subnet(network=network, segment_id=segment_id) as subnet: @@ -211,7 +322,8 @@ with self.network() as network2: net = network1['network'] - segment = self._test_create_segment(network_id=net['id']) + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) res = self._create_subnet(self.fmt, net_id=network2['network']['id'], @@ -240,7 +352,8 @@ with self.subnet(network=network): net = network['network'] - segment = self._test_create_segment(network_id=net['id']) + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) res = self._create_subnet(self.fmt, net_id=net['id'], @@ -259,7 +372,7 @@ segment = {segments_db.NETWORK_TYPE: 'phys_net', segments_db.PHYSICAL_NETWORK: 'net_type', segments_db.SEGMENTATION_ID: 200} - segments_db.add_network_segment(cxt.session, + segments_db.add_network_segment(cxt, network_id=net['id'], segment=segment, is_dynamic=True) @@ -280,8 +393,11 @@ config.cfg.CONF.set_override('mechanism_drivers', self._mechanism_drivers, group='ml2') + config.cfg.CONF.set_override('network_vlan_ranges', + ['phys_net1', 'phys_net2'], + group='ml2_type_vlan') if not plugin: - plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugin = 'ml2' super(HostSegmentMappingTestCase, self).setUp(plugin=plugin) db.subscribe() @@ -316,6 +432,11 @@ class TestMl2HostSegmentMappingNoAgent(HostSegmentMappingTestCase): + def setUp(self, plugin=None): + if not plugin: + plugin = TEST_PLUGIN_KLASS + super(TestMl2HostSegmentMappingNoAgent, self).setUp(plugin=plugin) + def test_update_segment_host_mapping(self): ctx = context.get_admin_context() host = 'host1' @@ -337,6 +458,36 @@ segments_host_db[segment['id']]['segment_id']) self.assertEqual(host, segments_host_db[segment['id']]['host']) + def test_map_segment_to_hosts(self): + ctx = context.get_admin_context() + hosts = {'host1', 'host2', 'host3'} + with self.network() as network: + network = network['network'] + segment = self._test_create_segment( + network_id=network['id'], physical_network='phys_net1', + segmentation_id=200, network_type=p_constants.TYPE_VLAN)['segment'] + db.map_segment_to_hosts(ctx, segment['id'], hosts) + updated_segment = self.plugin.get_segment(ctx, segment['id']) + self.assertEqual(hosts, set(updated_segment['hosts'])) + + def test_get_all_hosts_mapped_with_segments(self): + ctx = context.get_admin_context() + hosts = set() + with self.network() as network: + network_id = network['network']['id'] + for i in range(1, 3): + host = "host%s" % i + segment = self._test_create_segment( + network_id=network_id, physical_network='phys_net%s' % i, + segmentation_id=200 + i, network_type=p_constants.TYPE_VLAN) + db.update_segment_host_mapping( + ctx, host, {segment['segment']['id']}) + hosts.add(host) + + # Now they are 2 hosts with segment being mapped. + actual_hosts = db.get_hosts_mapped_with_segments(ctx) + self.assertEqual(hosts, actual_hosts) + class TestMl2HostSegmentMappingOVS(HostSegmentMappingTestCase): _mechanism_drivers = ['openvswitch', 'logger'] @@ -389,6 +540,56 @@ segments_host_db[segment['id']]['segment_id']) self.assertEqual(host2, segments_host_db[segment['id']]['host']) + def test_update_agent_only_change_agent_host_mapping(self): + host1 = 'host1' + host2 = 'host2' + physical_network = 'phys_net1' + with self.network() as network: + network = network['network'] + segment1 = self._test_create_segment( + network_id=network['id'], + physical_network=physical_network, + segmentation_id=200, + network_type=p_constants.TYPE_VLAN)['segment'] + self._register_agent(host1, mappings={physical_network: 'br-eth-1'}, + plugin=self.plugin) + self._register_agent(host2, mappings={physical_network: 'br-eth-1'}, + plugin=self.plugin) + + # Update agent at host2 should only change mapping with host2. + other_phys_net = 'phys_net2' + segment2 = self._test_create_segment( + network_id=network['id'], + physical_network=other_phys_net, + segmentation_id=201, + network_type=p_constants.TYPE_VLAN)['segment'] + self._register_agent(host2, mappings={other_phys_net: 'br-eth-2'}, + plugin=self.plugin) + # We should have segment1 map to host1 and segment2 map to host2 now + segments_host_db1 = self._get_segments_for_host(host1) + self.assertEqual(1, len(segments_host_db1)) + self.assertEqual(segment1['id'], + segments_host_db1[segment1['id']]['segment_id']) + self.assertEqual(host1, segments_host_db1[segment1['id']]['host']) + segments_host_db2 = self._get_segments_for_host(host2) + self.assertEqual(1, len(segments_host_db2)) + self.assertEqual(segment2['id'], + segments_host_db2[segment2['id']]['segment_id']) + self.assertEqual(host2, segments_host_db2[segment2['id']]['host']) + + def test_new_segment_after_host_reg(self): + host1 = 'host1' + physical_network = 'phys_net1' + segment = self._test_one_segment_one_host(host1) + with self.network() as network: + network = network['network'] + segment2 = self._test_create_segment( + network_id=network['id'], physical_network=physical_network, + segmentation_id=201, network_type=p_constants.TYPE_VLAN)['segment'] + segments_host_db = self._get_segments_for_host(host1) + self.assertEqual(set((segment['id'], segment2['id'])), + set(segments_host_db)) + def test_segment_deletion_removes_host_mapping(self): host = 'host1' segment = self._test_one_segment_one_host(host) @@ -536,13 +737,16 @@ segment = self._test_create_segment( network_id=network['network']['id'], - physical_network=physnet) + physical_network=physnet, + network_type=p_constants.TYPE_VLAN) ip_version = netaddr.IPNetwork(cidr).version if cidr else None with self.subnet(network=network, segment_id=segment['segment']['id'], ip_version=ip_version, cidr=cidr) as subnet: + self._validate_l2_adjacency(network['network']['id'], + is_adjacent=False) return network, segment, subnet def _create_test_segments_with_subnets(self, num): @@ -590,6 +794,8 @@ tenant_id=network['network']['tenant_id'], arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) + res = self.deserialize(self.fmt, response) + self._validate_immediate_ip_allocation(res['port']['id']) # Since host mapped to middle segment, IP must come from middle subnet self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr']) @@ -599,7 +805,8 @@ with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) @@ -622,7 +829,10 @@ cidr='2001:db8:0:0::/64') as subnet: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) + + self._validate_l2_adjacency(network['network']['id'], is_adjacent=True) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) @@ -633,6 +843,9 @@ arg_list=(portbindings.HOST_ID,), **{portbindings.HOST_ID: 'fakehost'}) + res = self.deserialize(self.fmt, response) + self._validate_immediate_ip_allocation(res['port']['id']) + # Since the subnet is not on a segment, fall back to it self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) @@ -687,7 +900,8 @@ with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) # Create a port with no IP address (since there is no subnet) port = self._create_deferred_ip_port(network) @@ -705,12 +919,31 @@ # Gets bad request because there are no eligible subnets. self.assertEqual(webob.exc.HTTPBadRequest.code, response.status_int) + def test_port_without_ip_not_deferred(self): + """Ports without addresses on non-routed networks are not deferred""" + with self.network() as network: + pass + + # Create a bound port with no IP address (since there is no subnet) + response = self._create_port(self.fmt, + net_id=network['network']['id'], + tenant_id=network['network']['tenant_id'], + arg_list=(portbindings.HOST_ID,), + **{portbindings.HOST_ID: 'fakehost'}) + port = self.deserialize(self.fmt, response) + request = self.new_show_request('ports', port['port']['id']) + response = self.deserialize(self.fmt, request.get_response(self.api)) + + self.assertEqual(ip_allocation.IP_ALLOCATION_IMMEDIATE, + response['port'][ip_allocation.IP_ALLOCATION]) + def test_port_update_is_host_aware(self): """Binding information is provided, subnets on segments""" with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) @@ -726,6 +959,9 @@ # Create the subnet and try to update the port to get an IP with self.subnet(network=network, segment_id=segment['segment']['id']) as subnet: + self._validate_deferred_ip_allocation(port['port']['id']) + self._validate_l2_adjacency(network['network']['id'], + is_adjacent=False) # Try requesting an IP (but the only subnet is on a segment) data = {'port': { 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}} @@ -737,6 +973,30 @@ self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) + def _validate_l2_adjacency(self, network_id, is_adjacent): + request = self.new_show_request('networks', network_id) + response = self.deserialize(self.fmt, request.get_response(self.api)) + self.assertEqual(is_adjacent, + response['network'][l2_adjacency.L2_ADJACENCY]) + + def _validate_deferred_ip_allocation(self, port_id): + request = self.new_show_request('ports', port_id) + response = self.deserialize(self.fmt, request.get_response(self.api)) + + self.assertEqual(ip_allocation.IP_ALLOCATION_DEFERRED, + response['port'][ip_allocation.IP_ALLOCATION]) + ips = response['port']['fixed_ips'] + self.assertEqual(0, len(ips)) + + def _validate_immediate_ip_allocation(self, port_id): + request = self.new_show_request('ports', port_id) + response = self.deserialize(self.fmt, request.get_response(self.api)) + + self.assertEqual(ip_allocation.IP_ALLOCATION_IMMEDIATE, + response['port'][ip_allocation.IP_ALLOCATION]) + ips = response['port']['fixed_ips'] + self.assertNotEqual(0, len(ips)) + def _create_deferred_ip_port(self, network): response = self._create_port(self.fmt, net_id=network['network']['id'], @@ -744,6 +1004,7 @@ port = self.deserialize(self.fmt, response) ips = port['port']['fixed_ips'] self.assertEqual(0, len(ips)) + return port def test_port_update_deferred_allocation(self): @@ -754,6 +1015,7 @@ self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) port = self._create_deferred_ip_port(network) + self._validate_deferred_ip_allocation(port['port']['id']) # Try requesting an IP (but the only subnet is on a segment) data = {'port': {portbindings.HOST_ID: 'fakehost'}} @@ -833,6 +1095,7 @@ network, segment, subnet = self._create_test_segment_with_subnet() port = self._create_deferred_ip_port(network) + self._validate_deferred_ip_allocation(port['port']['id']) # Try requesting an IP (but the only subnet is on a segment) data = {'port': {portbindings.HOST_ID: 'fakehost'}} @@ -851,6 +1114,7 @@ network, segments, _s = self._create_test_segments_with_subnets(2) port = self._create_deferred_ip_port(network) + self._validate_deferred_ip_allocation(port['port']['id']) # This host is bound to multiple segments self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost'), @@ -889,8 +1153,178 @@ self.assertEqual(webob.exc.HTTPOk.code, response.status_int) self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr']) + def test_port_update_deferred_allocation_no_ips(self): + """Binding information is provided on update, subnets on segments""" + network, segments, subnets = self._create_test_segments_with_subnets(2) + + self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'), + (segments[1]['segment']['id'], 'fakehost')]) + + port = self._create_deferred_ip_port(network) + + # Update the subnet on the second segment to be out of IPs + subnet_data = {'subnet': {'allocation_pools': []}} + subnet_req = self.new_update_request('subnets', + subnet_data, + subnets[1]['subnet']['id']) + subnet_response = subnet_req.get_response(self.api) + res = self.deserialize(self.fmt, subnet_response) + + # Try requesting an IP (but the subnet ran out of ips) + data = {'port': {portbindings.HOST_ID: 'fakehost'}} + port_id = port['port']['id'] + port_req = self.new_update_request('ports', data, port_id) + response = port_req.get_response(self.api) + res = self.deserialize(self.fmt, response) + + # Since port is bound and there is a mapping to segment, it succeeds. + self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) + self.assertEqual(n_exc.IpAddressGenerationFailure.__name__, + res['NeutronError']['type']) + + def test_port_update_fails_if_host_on_wrong_segment(self): + """Update a port with existing IPs to a host where they don't work""" + network, segments, subnets = self._create_test_segments_with_subnets(2) + + self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'), + (segments[1]['segment']['id'], 'fakehost')]) + + # Create a bound port with an IP address + response = self._create_port(self.fmt, + net_id=network['network']['id'], + tenant_id=network['network']['tenant_id'], + arg_list=(portbindings.HOST_ID,), + **{portbindings.HOST_ID: 'fakehost'}) + self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr']) + port = self.deserialize(self.fmt, response) + + # Now, try to update binding to a host on the other segment + data = {'port': {portbindings.HOST_ID: 'fakehost2'}} + port_req = self.new_update_request('ports', data, port['port']['id']) + response = port_req.get_response(self.api) + + # It fails since the IP address isn't compatible with the new segment + self.assertEqual(webob.exc.HTTPConflict.code, response.status_int) + + def test_port_update_fails_if_host_on_good_segment(self): + """Update a port with existing IPs to a host where they don't work""" + network, segments, subnets = self._create_test_segments_with_subnets(2) + + self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost2'), + (segments[1]['segment']['id'], 'fakehost1'), + (segments[1]['segment']['id'], 'fakehost')]) + + # Create a bound port with an IP address + response = self._create_port(self.fmt, + net_id=network['network']['id'], + tenant_id=network['network']['tenant_id'], + arg_list=(portbindings.HOST_ID,), + **{portbindings.HOST_ID: 'fakehost'}) + self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr']) + port = self.deserialize(self.fmt, response) + + # Now, try to update binding to another host in same segment + data = {'port': {portbindings.HOST_ID: 'fakehost1'}} + port_req = self.new_update_request('ports', data, port['port']['id']) + response = port_req.get_response(self.api) + + # Since the new host is in the same segment, it succeeds. + self.assertEqual(webob.exc.HTTPOk.code, response.status_int) + class TestSegmentAwareIpamML2(TestSegmentAwareIpam): def setUp(self): - super(TestSegmentAwareIpamML2, self).setUp( - plugin='neutron.plugins.ml2.plugin.Ml2Plugin') + config.cfg.CONF.set_override('network_vlan_ranges', + ['physnet:200:209', 'physnet0:200:209', + 'physnet1:200:209', 'physnet2:200:209'], + group='ml2_type_vlan') + super(TestSegmentAwareIpamML2, self).setUp(plugin='ml2') + + +class TestDhcpAgentSegmentScheduling(HostSegmentMappingTestCase): + + _mechanism_drivers = ['openvswitch', 'logger'] + mock_path = 'neutron.services.segments.db.update_segment_host_mapping' + + def setUp(self): + super(TestDhcpAgentSegmentScheduling, self).setUp() + self.dhcp_agent_db = agentschedulers_db.DhcpAgentSchedulerDbMixin() + self.ctx = context.get_admin_context() + + def _test_create_network_and_segment(self, phys_net): + with self.network() as net: + network = net['network'] + segment = self._test_create_segment(network_id=network['id'], + physical_network=phys_net, + segmentation_id=200, + network_type='vlan') + dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( + self.ctx, [network['id']]) + self.assertEqual(0, len(dhcp_agents)) + return network, segment['segment'] + + def _test_create_subnet(self, network, segment, cidr=None, + enable_dhcp=True): + cidr = cidr or '10.0.0.0/24' + ip_version = 4 + with self.subnet(network={'network': network}, + segment_id=segment['id'], + ip_version=ip_version, + cidr=cidr, + enable_dhcp=enable_dhcp) as subnet: + pass + return subnet['subnet'] + + def _register_dhcp_agents(self, hosts=None): + hosts = hosts or [DHCP_HOSTA, DHCP_HOSTB] + for host in hosts: + helpers.register_dhcp_agent(host) + + def test_network_scheduling_on_segment_creation(self): + self._register_dhcp_agents() + self._test_create_network_and_segment('phys_net1') + + def test_segment_scheduling_no_host_mapping(self): + self._register_dhcp_agents() + network, segment = self._test_create_network_and_segment('phys_net1') + self._test_create_subnet(network, segment) + dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( + self.ctx, [network['id']]) + self.assertEqual(0, len(dhcp_agents)) + + def test_segment_scheduling_with_host_mapping(self): + phys_net1 = 'phys_net1' + self._register_dhcp_agents() + network, segment = self._test_create_network_and_segment(phys_net1) + self._register_agent(DHCP_HOSTA, + mappings={phys_net1: 'br-eth-1'}, + plugin=self.plugin) + self._test_create_subnet(network, segment) + dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( + self.ctx, [network['id']]) + self.assertEqual(1, len(dhcp_agents)) + self.assertEqual(DHCP_HOSTA, dhcp_agents[0]['host']) + + def test_segment_scheduling_with_multiple_host_mappings(self): + phys_net1 = 'phys_net1' + phys_net2 = 'phys_net2' + self._register_dhcp_agents([DHCP_HOSTA, DHCP_HOSTB, 'MEHA', 'MEHB']) + network, segment1 = self._test_create_network_and_segment(phys_net1) + segment2 = self._test_create_segment(network_id=network['id'], + physical_network=phys_net2, + segmentation_id=200, + network_type='vlan')['segment'] + self._register_agent(DHCP_HOSTA, + mappings={phys_net1: 'br-eth-1'}, + plugin=self.plugin) + self._register_agent(DHCP_HOSTB, + mappings={phys_net2: 'br-eth-1'}, + plugin=self.plugin) + self._test_create_subnet(network, segment1) + self._test_create_subnet(network, segment2, cidr='11.0.0.0/24') + dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( + self.ctx, [network['id']]) + self.assertEqual(2, len(dhcp_agents)) + agent_hosts = [agent['host'] for agent in dhcp_agents] + self.assertIn(DHCP_HOSTA, agent_hosts) + self.assertIn(DHCP_HOSTB, agent_hosts) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_subnet_service_types.py neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_subnet_service_types.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/extensions/test_subnet_service_types.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/extensions/test_subnet_service_types.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,281 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from neutron.db import db_base_plugin_v2 +from neutron.extensions import subnet_service_types +from neutron.tests.unit.db import test_db_base_plugin_v2 + + +class SubnetServiceTypesExtensionManager(object): + + def get_resources(self): + return [] + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + def get_extended_resources(self, version): + return subnet_service_types.get_extended_resources(version) + + +class SubnetServiceTypesExtensionTestPlugin( + db_base_plugin_v2.NeutronDbPluginV2): + """Test plugin to mixin the subnet service_types extension. + """ + + supported_extension_aliases = ["subnet-service-types"] + + +class SubnetServiceTypesExtensionTestCase( + test_db_base_plugin_v2.NeutronDbPluginV2TestCase): + """Test API extension subnet_service_types attributes. + """ + CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8'] + IP_VERSION = 4 + + def setUp(self): + plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' + + 'SubnetServiceTypesExtensionTestPlugin') + ext_mgr = SubnetServiceTypesExtensionManager() + super(SubnetServiceTypesExtensionTestCase, + self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + def _create_service_subnet(self, service_types=None, cidr=None, + network=None): + if not network: + with self.network() as network: + pass + network = network['network'] + if not cidr: + cidr = self.CIDRS[0] + args = {'net_id': network['id'], + 'tenant_id': network['tenant_id'], + 'cidr': cidr, + 'ip_version': self.IP_VERSION} + if service_types: + args['service_types'] = service_types + return self._create_subnet(self.fmt, **args) + + def _test_create_subnet(self, service_types, expect_fail=False): + res = self._create_service_subnet(service_types) + if expect_fail: + self.assertEqual(webob.exc.HTTPClientError.code, + res.status_int) + else: + subnet = self.deserialize('json', res) + subnet = subnet['subnet'] + self.assertEqual(len(service_types), + len(subnet['service_types'])) + for service in service_types: + self.assertIn(service, subnet['service_types']) + + def test_create_subnet_blank_type(self): + self._test_create_subnet([]) + + def test_create_subnet_bar_type(self): + self._test_create_subnet(['network:bar']) + + def test_create_subnet_foo_type(self): + self._test_create_subnet(['compute:foo']) + + def test_create_subnet_bar_and_foo_type(self): + self._test_create_subnet(['network:bar', 'compute:foo']) + + def test_create_subnet_invalid_type(self): + self._test_create_subnet(['foo'], expect_fail=True) + + def test_create_subnet_no_type(self): + res = self._create_service_subnet() + subnet = self.deserialize('json', res) + subnet = subnet['subnet'] + self.assertFalse(subnet['service_types']) + + def _test_update_subnet(self, subnet, service_types, expect_fail=False): + data = {'subnet': {'service_types': service_types}} + req = self.new_update_request('subnets', data, subnet['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + if expect_fail: + self.assertEqual('InvalidSubnetServiceType', + res['NeutronError']['type']) + else: + subnet = res['subnet'] + self.assertEqual(len(service_types), + len(subnet['service_types'])) + for service in service_types: + self.assertIn(service, subnet['service_types']) + + def test_update_subnet_zero_to_one(self): + service_types = ['network:foo'] + # Create a subnet with no service type + res = self._create_service_subnet() + subnet = self.deserialize('json', res)['subnet'] + # Update it with a single service type + self._test_update_subnet(subnet, service_types) + + def test_update_subnet_one_to_two(self): + service_types = ['network:foo'] + # Create a subnet with one service type + res = self._create_service_subnet(service_types) + subnet = self.deserialize('json', res)['subnet'] + # Update it with two service types + service_types.append('compute:bar') + self._test_update_subnet(subnet, service_types) + + def test_update_subnet_two_to_one(self): + service_types = ['network:foo', 'compute:bar'] + # Create a subnet with two service types + res = self._create_service_subnet(service_types) + subnet = self.deserialize('json', res)['subnet'] + # Update it with one service type + service_types = ['network:foo'] + self._test_update_subnet(subnet, service_types) + + def test_update_subnet_one_to_zero(self): + service_types = ['network:foo'] + # Create a subnet with one service type + res = self._create_service_subnet(service_types) + subnet = self.deserialize('json', res)['subnet'] + # Update it with zero service types + service_types = [] + self._test_update_subnet(subnet, service_types) + + def test_update_subnet_invalid_type(self): + service_types = ['foo'] + # Create a subnet with no service type + res = self._create_service_subnet() + subnet = self.deserialize('json', res)['subnet'] + # Update it with an invalid service type + self._test_update_subnet(subnet, service_types, expect_fail=True) + + def _assert_port_res(self, port, service_type, subnet, fallback, + error='IpAddressGenerationFailureNoMatchingSubnet'): + res = self.deserialize('json', port) + if fallback: + port = res['port'] + self.assertEqual(1, len(port['fixed_ips'])) + self.assertEqual(service_type, port['device_owner']) + self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id']) + else: + self.assertEqual(error, res['NeutronError']['type']) + + def test_create_port_with_matching_service_type(self): + with self.network() as network: + pass + matching_type = 'network:foo' + non_matching_type = 'network:bar' + # Create a subnet with no service types + self._create_service_subnet(network=network) + # Create a subnet with a non-matching service type + self._create_service_subnet([non_matching_type], + cidr=self.CIDRS[2], + network=network) + # Create a subnet with a service type to match the port device owner + res = self._create_service_subnet([matching_type], + cidr=self.CIDRS[1], + network=network) + service_subnet = self.deserialize('json', res)['subnet'] + # Create a port with device owner matching the correct service subnet + network = network['network'] + port = self._create_port(self.fmt, + net_id=network['id'], + tenant_id=network['tenant_id'], + device_owner=matching_type) + self._assert_port_res(port, matching_type, service_subnet, True) + + def test_create_port_without_matching_service_type(self, fallback=True): + with self.network() as network: + pass + subnet = '' + matching_type = 'compute:foo' + non_matching_type = 'network:foo' + if fallback: + # Create a subnet with no service types + res = self._create_service_subnet(network=network) + subnet = self.deserialize('json', res)['subnet'] + # Create a subnet with a non-matching service type + self._create_service_subnet([non_matching_type], + cidr=self.CIDRS[1], + network=network) + # Create a port with device owner not matching the service subnet + network = network['network'] + port = self._create_port(self.fmt, + net_id=network['id'], + tenant_id=network['tenant_id'], + device_owner=matching_type) + self._assert_port_res(port, matching_type, subnet, fallback) + + def test_create_port_without_matching_service_type_no_fallback(self): + self.test_create_port_without_matching_service_type(fallback=False) + + def test_create_port_no_device_owner(self, fallback=True): + with self.network() as network: + pass + subnet = '' + service_type = 'compute:foo' + if fallback: + # Create a subnet with no service types + res = self._create_service_subnet(network=network) + subnet = self.deserialize('json', res)['subnet'] + # Create a subnet with a service_type + self._create_service_subnet([service_type], + cidr=self.CIDRS[1], + network=network) + # Create a port without a device owner + network = network['network'] + port = self._create_port(self.fmt, + net_id=network['id'], + tenant_id=network['tenant_id']) + self._assert_port_res(port, '', subnet, fallback) + + def test_create_port_no_device_owner_no_fallback(self): + self.test_create_port_no_device_owner(fallback=False) + + def test_create_port_exhausted_subnet(self, fallback=True): + with self.network() as network: + pass + subnet = '' + service_type = 'compute:foo' + if fallback: + # Create a subnet with no service types + res = self._create_service_subnet(network=network) + subnet = self.deserialize('json', res)['subnet'] + # Create a subnet with a service_type + res = self._create_service_subnet([service_type], + cidr=self.CIDRS[1], + network=network) + service_subnet = self.deserialize('json', res)['subnet'] + # Update the service subnet with empty allocation pools + data = {'subnet': {'allocation_pools': []}} + req = self.new_update_request('subnets', data, service_subnet['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + # Create a port with a matching device owner + network = network['network'] + port = self._create_port(self.fmt, + net_id=network['id'], + tenant_id=network['tenant_id'], + device_owner=service_type) + self._assert_port_res(port, service_type, subnet, fallback, + error='IpAddressGenerationFailure') + + def test_create_port_exhausted_subnet_no_fallback(self): + self.test_create_port_exhausted_subnet(fallback=False) + + +class SubnetServiceTypesExtensionTestCasev6( + SubnetServiceTypesExtensionTestCase): + CIDRS = ['2001:db8:2::/64', '2001:db8:3::/64', '2001:db8:4::/64'] + IP_VERSION = 6 diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/hacking/test_checks.py neutron-9.0.0~b3~dev557/neutron/tests/unit/hacking/test_checks.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/hacking/test_checks.py 2016-06-01 18:00:21.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/hacking/test_checks.py 2016-08-29 20:05:49.000000000 +0000 @@ -24,6 +24,9 @@ from neutron.tests import base +CREATE_DUMMY_MATCH_OBJECT = re.compile('a') + + class HackingTestCase(base.BaseTestCase): def assertLinePasses(self, func, line): @@ -165,29 +168,6 @@ self.assertLineFails(f, "d.iteritems()") self.assertLinePasses(f, "six.iteritems(d)") - def test_asserttrue(self): - fail_code1 = """ - test_bool = True - self.assertEqual(True, test_bool) - """ - fail_code2 = """ - test_bool = True - self.assertEqual(test_bool, True) - """ - pass_code = """ - test_bool = True - self.assertTrue(test_bool) - """ - self.assertEqual( - 1, len(list(checks.check_asserttrue(fail_code1, - "neutron/tests/test_assert.py")))) - self.assertEqual( - 1, len(list(checks.check_asserttrue(fail_code2, - "neutron/tests/test_assert.py")))) - self.assertEqual( - 0, len(list(checks.check_asserttrue(pass_code, - "neutron/tests/test_assert.py")))) - def test_no_mutable_default_args(self): self.assertEqual(1, len(list(checks.no_mutable_default_args( " def fake_suds_context(calls={}):")))) @@ -201,28 +181,55 @@ self.assertEqual(0, len(list(checks.no_mutable_default_args( "defined, undefined = [], {}")))) - def test_assertfalse(self): - fail_code1 = """ + def test_asserttruefalse(self): + true_fail_code1 = """ + test_bool = True + self.assertEqual(True, test_bool) + """ + true_fail_code2 = """ + test_bool = True + self.assertEqual(test_bool, True) + """ + true_pass_code = """ + test_bool = True + self.assertTrue(test_bool) + """ + false_fail_code1 = """ test_bool = False self.assertEqual(False, test_bool) """ - fail_code2 = """ + false_fail_code2 = """ test_bool = False self.assertEqual(test_bool, False) """ - pass_code = """ + false_pass_code = """ test_bool = False self.assertFalse(test_bool) """ self.assertEqual( - 1, len(list(checks.check_assertfalse(fail_code1, - "neutron/tests/test_assert.py")))) - self.assertEqual( - 1, len(list(checks.check_assertfalse(fail_code2, - "neutron/tests/test_assert.py")))) - self.assertEqual( - 0, len(list(checks.check_assertfalse(pass_code, - "neutron/tests/test_assert.py")))) + 1, len(list( + checks.check_asserttruefalse(true_fail_code1, + "neutron/tests/test_assert.py")))) + self.assertEqual( + 1, len(list( + checks.check_asserttruefalse(true_fail_code2, + "neutron/tests/test_assert.py")))) + self.assertEqual( + 0, len(list( + checks.check_asserttruefalse(true_pass_code, + "neutron/tests/test_assert.py")))) + self.assertEqual( + 1, len(list( + checks.check_asserttruefalse(false_fail_code1, + "neutron/tests/test_assert.py")))) + self.assertEqual( + 1, len(list( + checks.check_asserttruefalse(false_fail_code2, + "neutron/tests/test_assert.py")))) + self.assertFalse( + list( + checks.check_asserttruefalse(false_pass_code, + "neutron/tests/test_assert.py"))) def test_assertempty(self): fail_code = """ @@ -242,10 +249,10 @@ 1, len(list(checks.check_assertempty(fail_code % (ec, ec), "neutron/tests/test_assert.py")))) self.assertEqual( - 0, len(list(checks.check_assertfalse(pass_code1 % (ec, ec), + 0, len(list(checks.check_asserttruefalse(pass_code1 % (ec, ec), "neutron/tests/test_assert.py")))) self.assertEqual( - 0, len(list(checks.check_assertfalse(pass_code2 % ec, + 0, len(list(checks.check_asserttruefalse(pass_code2 % ec, "neutron/tests/test_assert.py")))) def test_assertisinstance(self): @@ -294,6 +301,43 @@ self.assertLineFails(f, 'from unittest.TestSuite') self.assertLineFails(f, 'import unittest') + def test_check_delayed_string_interpolation(self): + dummy_noqa = CREATE_DUMMY_MATCH_OBJECT.search('a') + + # In 'logical_line', Contents of strings replaced with + # "xxx" of same length. + fail_code1 = 'LOG.error(_LE("xxxxxxxxxxxxxxx") % value)' + fail_code2 = "LOG.warning(msg % 'xxxxx')" + + self.assertEqual( + 1, len(list(checks.check_delayed_string_interpolation(fail_code1, + "neutron/common/rpc.py", None)))) + self.assertEqual( + 1, len(list(checks.check_delayed_string_interpolation(fail_code2, + "neutron/common/rpc.py", None)))) + + pass_code1 = 'LOG.error(_LE("xxxxxxxxxxxxxxxxxx"), value)' + pass_code2 = "LOG.warning(msg, 'xxxxx')" + self.assertEqual( + 0, len(list(checks.check_delayed_string_interpolation(pass_code1, + "neutron/common/rpc.py", None)))) + self.assertEqual( + 0, len(list(checks.check_delayed_string_interpolation(pass_code2, + "neutron/common/rpc.py", None)))) + # check a file in neutron/tests + self.assertEqual( + 0, len(list(checks.check_delayed_string_interpolation(fail_code1, + "neutron/tests/test_assert.py", + None)))) + # check code including 'noqa' + self.assertEqual( + 0, len(list(checks.check_delayed_string_interpolation(fail_code1, + "neutron/common/rpc.py", dummy_noqa)))) + + def test_check_log_warn_deprecated(self): + bad = "LOG.warn(_LW('i am zlatan!'))" + self.assertEqual( + 1, len(list(checks.check_log_warn_deprecated(bad, 'f')))) # The following is borrowed from hacking/tests/test_doctest.py. # Tests defined in docstring is easier to understand diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py 2016-08-03 20:10:34.000000000 +0000 @@ -142,22 +142,39 @@ ipam_req.AnySubnetRequest(self._tenant_id, 'meh', constants.IPv4, 24)) - def test_update_subnet_pools(self): + def _test_update_subnet_pools(self, allocation_pools, expected_pools=None): + if expected_pools is None: + expected_pools = allocation_pools cidr = '10.0.0.0/24' subnet, subnet_req = self._prepare_specific_subnet_request(cidr) self.ipam_pool.allocate_subnet(subnet_req) - allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), - netaddr.IPRange('10.0.0.200', '10.0.0.250')] update_subnet_req = ipam_req.SpecificSubnetRequest( self._tenant_id, subnet['id'], cidr, gateway_ip=subnet['gateway_ip'], allocation_pools=allocation_pools) - ipam_subnet = self.ipam_pool.update_subnet(update_subnet_req) + self.ipam_pool.update_subnet(update_subnet_req) + ipam_subnet = self.ipam_pool.get_subnet(subnet['id']) self._verify_ipam_subnet_details( ipam_subnet, - cidr, self._tenant_id, subnet['gateway_ip'], allocation_pools) + cidr, self._tenant_id, subnet['gateway_ip'], expected_pools) + + def test_update_subnet_pools(self): + allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), + netaddr.IPRange('10.0.0.200', '10.0.0.250')] + self._test_update_subnet_pools(allocation_pools) + + def test_update_subnet_pools_with_blank_pools(self): + allocation_pools = [] + self._test_update_subnet_pools(allocation_pools) + + def test_update_subnet_pools_with_none_pools(self): + allocation_pools = None + expected_pools = [netaddr.IPRange('10.0.0.2', '10.0.0.254')] + # Pools should not be changed on update + self._test_update_subnet_pools(allocation_pools, + expected_pools=expected_pools) def test_get_subnet(self): cidr = '10.0.0.0/24' diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/ipam/test_requests.py neutron-9.0.0~b3~dev557/neutron/tests/unit/ipam/test_requests.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/ipam/test_requests.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/ipam/test_requests.py 2016-08-03 20:10:34.000000000 +0000 @@ -288,7 +288,8 @@ def test_specific_address_request_is_loaded(self): for address in ('10.12.0.15', 'fffe::1'): ip = {'ip_address': address} - port = {'device_owner': 'compute:None'} + port = {'device_owner': + constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.SpecificAddressRequest) @@ -296,7 +297,8 @@ def test_any_address_request_is_loaded(self): for addr in [None, '']: ip = {'ip_address': addr} - port = {'device_owner': 'compute:None'} + port = {'device_owner': + constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.AnyAddressRequest) @@ -305,14 +307,14 @@ ip = {'mac': '6c:62:6d:de:cf:49', 'subnet_cidr': '2001:470:abcd::/64', 'eui64_address': True} - port = {'device_owner': 'compute:None'} + port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.AutomaticAddressRequest) def test_prefernext_address_request_on_dhcp_port(self): ip = {} - port = {'device_owner': 'network:dhcp'} + port = {'device_owner': constants.DEVICE_OWNER_DHCP} self.assertIsInstance( ipam_req.AddressRequestFactory.get_request(None, port, ip), ipam_req.PreferNextAddressRequest) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/notifiers/test_nova.py neutron-9.0.0~b3~dev557/neutron/tests/unit/notifiers/test_nova.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/notifiers/test_nova.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/notifiers/test_nova.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,6 +16,7 @@ import mock from neutron_lib import constants as n_const +from neutron_lib import exceptions as n_exc from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_utils import uuidutils @@ -26,6 +27,7 @@ from neutron.tests import base DEVICE_OWNER_COMPUTE = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' +DEVICE_OWNER_BAREMETAL = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'fake' class TestNovaNotify(base.BaseTestCase): @@ -175,6 +177,17 @@ 'delete_floatingip', {}, returned_obj) self.assertEqual(expected_event, event) + def test_delete_floatingip_deleted_port_no_notify(self): + port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' + with mock.patch.object( + self.nova_notifier._plugin_ref, 'get_port', + side_effect=n_exc.PortNotFound(port_id=port_id)): + returned_obj = {'floatingip': + {'port_id': port_id}} + event = self.nova_notifier.create_port_changed_event( + 'delete_floatingip', {}, returned_obj) + self.assertIsNone(event) + def test_delete_floatingip_no_port_id_no_notify(self): returned_obj = {'floatingip': {'port_id': None}} @@ -309,6 +322,21 @@ {}, returned_obj) self.assertEqual(expected_event, event) + def test_delete_baremetal_port_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' + returned_obj = {'port': + {'device_owner': DEVICE_OWNER_BAREMETAL, + 'id': port_id, + 'device_id': device_id}} + + expected_event = {'server_uuid': device_id, + 'name': nova.VIF_DELETED, + 'tag': port_id} + event = self.nova_notifier.create_port_changed_event('delete_port', + {}, returned_obj) + self.assertEqual(expected_event, event) + @mock.patch('novaclient.client.Client') def test_endpoint_types(self, mock_client): nova.Notifier() @@ -328,3 +356,19 @@ region_name=cfg.CONF.nova.region_name, endpoint_type='internal', extensions=mock.ANY) + + def test_notify_port_active_direct(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' + port = models_v2.Port(id=port_id, device_id=device_id, + device_owner=DEVICE_OWNER_COMPUTE) + expected_event = {'server_uuid': device_id, + 'name': nova.VIF_PLUGGED, + 'status': 'completed', + 'tag': port_id} + self.nova_notifier.notify_port_active_direct(port) + + self.assertEqual( + 1, len(self.nova_notifier.batch_notifier.pending_events)) + self.assertEqual(expected_event, + self.nova_notifier.batch_notifier.pending_events[0]) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/extensions/test_standardattributes.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/extensions/test_standardattributes.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/extensions/test_standardattributes.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/extensions/test_standardattributes.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,13 +17,14 @@ import sqlalchemy as sa from neutron.db import model_base +from neutron.db import standard_attr from neutron.objects import base as objects_base from neutron.tests.unit.objects import test_base from neutron.tests.unit import testlib_api class FakeDbModelWithStandardAttributes( - model_base.HasStandardAttributes, model_base.BASEV2): + standard_attr.HasStandardAttributes, model_base.BASEV2): id = sa.Column(sa.String(36), primary_key=True, nullable=False) item = sa.Column(sa.String(64)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/network/extensions/test_port_security.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/network/extensions/test_port_security.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/network/extensions/test_port_security.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/network/extensions/test_port_security.py 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,36 @@ +# Copyright 2013 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.objects.network.extensions import port_security +from neutron.tests.unit.objects import test_base as obj_test_base +from neutron.tests.unit import testlib_api + + +class NetworkPortSecurityIfaceObjTestCase( + obj_test_base.BaseObjectIfaceTestCase): + + _test_class = port_security.NetworkPortSecurity + + +class NetworkPortSecurityDbObjTestCase(obj_test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = port_security.NetworkPortSecurity + + def setUp(self): + super(NetworkPortSecurityDbObjTestCase, self).setUp() + for db_obj, obj_field in zip(self.db_objs, self.obj_fields): + network = self._create_network() + db_obj['network_id'] = network['id'] + obj_field['id'] = network['id'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/network/test_network_segment.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/network/test_network_segment.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/network/test_network_segment.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/network/test_network_segment.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,36 @@ +# Copyright (c) 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools + +from neutron.objects.network import network_segment +from neutron.tests.unit.objects import test_base as obj_test_base +from neutron.tests.unit import testlib_api + + +class NetworkSegmentIfaceObjectTestCase(obj_test_base.BaseObjectIfaceTestCase): + + _test_class = network_segment.NetworkSegment + + +class NetworkSegmentDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = network_segment.NetworkSegment + + def setUp(self): + super(NetworkSegmentDbObjectTestCase, self).setUp() + self._create_test_network() + for obj in itertools.chain(self.db_objs, self.obj_fields): + obj['network_id'] = self._network['id'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/port/extensions/test_port_security.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/port/extensions/test_port_security.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/port/extensions/test_port_security.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/port/extensions/test_port_security.py 2016-08-03 20:10:34.000000000 +0000 @@ -30,8 +30,6 @@ def setUp(self): super(PortSecurityDbObjTestCase, self).setUp() self._create_test_network() - self._create_test_port(self._network) for obj in self.db_objs: - obj['port_id'] = self._port['id'] - for obj in self.obj_fields: - obj['port_id'] = self._port['id'] + self._create_port(id=obj['port_id'], + network_id=self._network['id']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/qos/test_policy.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/qos/test_policy.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/qos/test_policy.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/qos/test_policy.py 2016-08-03 20:10:34.000000000 +0000 @@ -384,10 +384,10 @@ self.assertIn(rule_obj_band, policy_obj_v1_1.rules) self.assertIn(rule_obj_dscp, policy_obj_v1_1.rules) - self.assertEqual(policy_obj.VERSION, '1.1') - #TODO(davidsha) add testing for object version incrementation def test_object_version_degradation_1_1_to_1_0(self): + #NOTE(mangelajo): we should not check .VERSION, since that's the + # local version on the class definition policy_obj, rule_obj_band, rule_obj_dscp = ( self._create_test_policy_with_bw_and_dscp()) @@ -395,8 +395,6 @@ self.assertIn(rule_obj_band, policy_obj_v1_0.rules) self.assertNotIn(rule_obj_dscp, policy_obj_v1_0.rules) - #NOTE(mangelajo): we should not check .VERSION, since that's the - # local version on the class definition def test_filter_by_shared(self): policy_obj = policy.QosPolicy( @@ -416,3 +414,9 @@ self.context, shared=False) self.assertEqual(1, len(private_policies)) self.assertEqual('private-policy', private_policies[0].name) + + def test_get_objects_queries_constant(self): + # NOTE(korzen) QoSPolicy is using extra queries to reload rules. + # QoSPolicy currently cannot be loaded using constant queries number. + # It can be reworked in follow-up patch. + pass diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/qos/test_rule.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/qos/test_rule.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/qos/test_rule.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/qos/test_rule.py 2016-08-03 20:10:34.000000000 +0000 @@ -12,6 +12,8 @@ from neutron_lib import constants +from oslo_versionedobjects import exception + from neutron.objects.qos import policy from neutron.objects.qos import rule from neutron.services.qos import qos_consts @@ -81,16 +83,30 @@ super(QosBandwidthLimitRuleDbObjectTestCase, self).setUp() # Prepare policy to be able to insert a rule - generated_qos_policy_id = self.db_obj['qos_policy_id'] - policy_obj = policy.QosPolicy(self.context, - id=generated_qos_policy_id) - policy_obj.create() + for obj in self.db_objs: + generated_qos_policy_id = obj['qos_policy_id'] + policy_obj = policy.QosPolicy(self.context, + id=generated_qos_policy_id) + policy_obj.create() class QosDscpMarkingRuleObjectTestCase(test_base.BaseObjectIfaceTestCase): _test_class = rule.QosDscpMarkingRule + def test_dscp_object_version_degradation(self): + dscp_rule = rule.QosDscpMarkingRule() + + self.assertRaises(exception.IncompatibleObjectVersion, + dscp_rule.obj_to_primitive, '1.0') + + def test_dscp_object_version(self): + dscp_rule = rule.QosDscpMarkingRule() + + prim = dscp_rule.obj_to_primitive('1.1') + + self.assertTrue(prim) + class QosDscpMarkingRuleDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): @@ -100,7 +116,8 @@ def setUp(self): super(QosDscpMarkingRuleDbObjectTestCase, self).setUp() # Prepare policy to be able to insert a rule - generated_qos_policy_id = self.db_obj['qos_policy_id'] - policy_obj = policy.QosPolicy(self.context, - id=generated_qos_policy_id) - policy_obj.create() + for obj in self.db_objs: + generated_qos_policy_id = obj['qos_policy_id'] + policy_obj = policy.QosPolicy(self.context, + id=generated_qos_policy_id) + policy_obj.create() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/qos/test_rule_type.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/qos/test_rule_type.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/qos/test_rule_type.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/qos/test_rule_type.py 2016-08-03 20:10:34.000000000 +0000 @@ -44,3 +44,19 @@ def test_wrong_type(self): self.assertRaises(ValueError, rule_type.QosRuleType, type='bad_type') + + @staticmethod + def _policy_through_version(obj, version): + primitive = obj.obj_to_primitive(target_version=version) + return rule_type.QosRuleType.clean_obj_from_primitive(primitive) + + def test_object_version(self): + qos_rule_type = rule_type.QosRuleType() + rule_type_v1_1 = self._policy_through_version(qos_rule_type, '1.1') + + self.assertIn(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT, + tuple(rule_type_v1_1.fields['type'].AUTO_TYPE. + _valid_values)) + self.assertIn(qos_consts.RULE_TYPE_DSCP_MARKING, + tuple(rule_type_v1_1.fields['type'].AUTO_TYPE. + _valid_values)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_base.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_base.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_base.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_base.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,6 +13,7 @@ import collections import copy import itertools +import netaddr import random import mock @@ -29,19 +30,22 @@ from neutron.common import utils as common_utils from neutron import context from neutron.db import db_base_plugin_v2 +from neutron.db import model_base from neutron.db import models_v2 +from neutron.db import segments_db from neutron.objects import base from neutron.objects import common_types from neutron.objects.db import api as obj_db_api from neutron.objects import subnet from neutron.tests import base as test_base from neutron.tests import tools +from neutron.tests.unit.db import test_db_base_plugin_v2 SQLALCHEMY_COMMIT = 'sqlalchemy.engine.Connection._commit_impl' OBJECTS_BASE_OBJ_FROM_PRIMITIVE = ('oslo_versionedobjects.base.' 'VersionedObject.obj_from_primitive') -TIMESTAMP_FIELDS = ['created_at', 'updated_at'] +TIMESTAMP_FIELDS = ['created_at', 'updated_at', 'revision_number'] class FakeModel(object): @@ -63,7 +67,32 @@ primary_keys = ['field1'] - foreign_keys = {'field1': 'id'} + foreign_keys = { + 'FakeNeutronObjectCompositePrimaryKeyWithId': {'field1': 'id'}, + 'FakeNeutronDbObject': {'field2': 'id'}, + 'FakeNeutronObjectUniqueKey': {'field3': 'id'}, + } + + fields = { + 'field1': obj_fields.UUIDField(), + 'field2': obj_fields.UUIDField(), + 'field3': obj_fields.UUIDField(), + } + + +@obj_base.VersionedObjectRegistry.register_if(False) +class FakeSmallNeutronObjectWithMultipleParents(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = ObjectFieldsModel + + primary_keys = ['field1', 'field2'] + + foreign_keys = { + 'FakeParent': {'field1': 'id'}, + 'FakeParent2': {'field2': 'id'}, + } fields = { 'field1': obj_fields.UUIDField(), @@ -72,6 +101,25 @@ @obj_base.VersionedObjectRegistry.register_if(False) +class FakeParent(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = ObjectFieldsModel + + primary_keys = ['field1', 'field2'] + + fields = { + 'id': obj_fields.UUIDField(), + 'children': obj_fields.ListOfObjectsField( + 'FakeSmallNeutronObjectWithMultipleParents', + nullable=True) + } + + synthetic_fields = ['children'] + + +@obj_base.VersionedObjectRegistry.register_if(False) class FakeWeirdKeySmallNeutronObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' @@ -80,7 +128,10 @@ primary_keys = ['field1'] - foreign_keys = {'field1': 'weird_key'} + foreign_keys = { + 'FakeNeutronObjectNonStandardPrimaryKey': {'field1': 'weird_key'}, + 'FakeNeutronObjectCompositePrimaryKey': {'field2': 'weird_key'}, + } fields = { 'field1': obj_fields.UUIDField(), @@ -89,7 +140,7 @@ @obj_base.VersionedObjectRegistry.register_if(False) -class FakeNeutronObject(base.NeutronDbObject): +class FakeNeutronDbObject(base.NeutronDbObject): # Version 1.0: Initial version VERSION = '1.0' @@ -149,6 +200,28 @@ @obj_base.VersionedObjectRegistry.register_if(False) +class FakeNeutronObjectUniqueKey(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = FakeModel + + primary_keys = ['id', 'id2'] + unique_keys = [['unique_key'], ['id2']] + + fields = { + 'id': obj_fields.UUIDField(), + 'id2': obj_fields.UUIDField(), + 'unique_key': obj_fields.StringField(), + 'field1': obj_fields.StringField(), + 'obj_field': obj_fields.ObjectField('FakeSmallNeutronObject', + nullable=True) + } + + synthetic_fields = ['obj_field'] + + +@obj_base.VersionedObjectRegistry.register_if(False) class FakeNeutronObjectRenamedField(base.NeutronDbObject): """ Testing renaming the parameter from DB to NeutronDbObject @@ -199,7 +272,9 @@ db_model = ObjectFieldsModel - foreign_keys = {'field1': 'id', 'field2': 'id'} + foreign_keys = { + 'FakeNeutronObjectSyntheticField': {'field1': 'id', 'field2': 'id'}, + } fields = { 'field1': obj_fields.UUIDField(), @@ -223,6 +298,47 @@ synthetic_fields = ['obj_field'] +@obj_base.VersionedObjectRegistry.register_if(False) +class FakeNeutronObjectWithProjectId(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = FakeModel + + fields = { + 'id': obj_fields.UUIDField(), + 'project_id': obj_fields.StringField(), + 'field2': obj_fields.UUIDField(), + } + + +@obj_base.VersionedObjectRegistry.register_if(False) +class FakeNeutronObject(base.NeutronObject): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'id': obj_fields.UUIDField(), + 'project_id': obj_fields.StringField(), + 'field2': obj_fields.UUIDField(), + } + + @classmethod + def get_object(cls, context, **kwargs): + if not hasattr(cls, '_obj'): + cls._obj = FakeNeutronObject(id=uuidutils.generate_uuid(), + project_id='fake-id', + field2=uuidutils.generate_uuid()) + return cls._obj + + @classmethod + def get_objects(cls, context, _pager=None, count=1, **kwargs): + return [ + cls.get_object(context, **kwargs) + for i in range(count) + ] + + def get_random_dscp_mark(): return random.choice(constants.VALID_DSCP_MARKS) @@ -253,6 +369,10 @@ obj_fields.IPAddressField: tools.get_random_ip_address, common_types.MACAddressField: tools.get_random_EUI, common_types.IPV6ModeEnumField: tools.get_random_ipv6_mode, + common_types.FlowDirectionEnumField: tools.get_random_flow_direction, + common_types.EtherTypeEnumField: tools.get_random_ether_type, + common_types.IpProtocolEnumField: tools.get_random_ip_protocol, + common_types.PortRangeField: tools.get_random_port, } @@ -272,9 +392,14 @@ if field not in TIMESTAMP_FIELDS} +def get_non_synthetic_fields(objclass, obj_fields): + return {field: value for field, value in obj_fields.items() + if not objclass.is_synthetic(field)} + + class _BaseObjectTestCase(object): - _test_class = FakeNeutronObject + _test_class = FakeNeutronDbObject CORE_PLUGIN = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' @@ -316,12 +441,14 @@ return obj_cls.modify_fields_to_db(fields) @classmethod - def generate_object_keys(cls, obj_cls): + def generate_object_keys(cls, obj_cls, field_names=None): + if field_names is None: + field_names = obj_cls.primary_keys keys = {} - for field, field_obj in obj_cls.fields.items(): - if field in obj_cls.primary_keys: - generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] - keys[field] = generator() + for field in field_names: + field_obj = obj_cls.fields[field] + generator = FIELD_TYPE_VALUE_GENERATOR_MAP[type(field_obj)] + keys[field] = generator() return keys def get_updatable_fields(self, fields): @@ -334,6 +461,20 @@ def fake_get_objects(self, context, model, **kwargs): return self.model_map[model] + def _get_object_synthetic_fields(self, objclass): + return [field for field in objclass.synthetic_fields + if objclass.is_object_field(field)] + + def _get_ovo_object_class(self, objclass, field): + try: + name = objclass.fields[field].objname + return obj_base.VersionedObjectRegistry.obj_classes().get(name)[0] + except TypeError: + # NOTE(korzen) some synthetic fields are not handled by + # this method, for example the ones that have subclasses, see + # QosRule + return + class BaseObjectIfaceTestCase(_BaseObjectTestCase, test_base.BaseTestCase): @@ -341,6 +482,7 @@ super(BaseObjectIfaceTestCase, self).setUp() self.model_map = collections.defaultdict(list) self.model_map[self._test_class.db_model] = self.db_objs + self.pager_map = collections.defaultdict(lambda: None) def test_get_object(self): with mock.patch.object(obj_db_api, 'get_object', @@ -362,25 +504,53 @@ self.assertIsNone(obj) def test_get_object_missing_primary_key(self): - obj_keys = self.generate_object_keys(self._test_class) - obj_keys.popitem() + non_unique_fields = (set(self._test_class.fields.keys()) - + set(self._test_class.primary_keys) - + set(itertools.chain.from_iterable( + self._test_class.unique_keys))) + obj_keys = self.generate_object_keys(self._test_class, + non_unique_fields) self.assertRaises(base.NeutronPrimaryKeyMissing, self._test_class.get_object, self.context, **obj_keys) + def test_get_object_unique_key(self): + if not self._test_class.unique_keys: + self.skipTest('No unique keys found in test class %r' % + self._test_class) + + for unique_keys in self._test_class.unique_keys: + with mock.patch.object(obj_db_api, 'get_object', + return_value=self.db_obj) \ + as get_object_mock: + with mock.patch.object(obj_db_api, 'get_objects', + side_effect=self.fake_get_objects): + obj_keys = self.generate_object_keys(self._test_class, + unique_keys) + obj = self._test_class.get_object(self.context, + **obj_keys) + self.assertTrue(self._is_test_class(obj)) + self.assertEqual(self.obj_fields[0], + get_obj_db_fields(obj)) + get_object_mock.assert_called_once_with( + self.context, self._test_class.db_model, + **self._test_class.modify_fields_to_db(obj_keys)) + def _get_synthetic_fields_get_objects_calls(self, db_objs): mock_calls = [] for db_obj in db_objs: for field in self._test_class.synthetic_fields: if self._test_class.is_object_field(field): - obj_class = obj_base.VersionedObjectRegistry.obj_classes( - ).get(self._test_class.fields[ - field].objname)[0] + obj_class = self._get_ovo_object_class(self._test_class, + field) + foreign_keys = obj_class.foreign_keys.get( + self._test_class.__name__) mock_calls.append( mock.call( - self.context, obj_class.db_model, _pager=None, + self.context, obj_class.db_model, + _pager=self.pager_map[obj_class.obj_name()], **{k: db_obj[v] - for k, v in obj_class.foreign_keys.items()})) + for k, v in foreign_keys.items()})) return mock_calls def test_get_objects(self): @@ -390,7 +560,8 @@ objs = self._test_class.get_objects(self.context) self._validate_objects(self.db_objs, objs) mock_calls = [ - mock.call(self.context, self._test_class.db_model, _pager=None) + mock.call(self.context, self._test_class.db_model, + _pager=self.pager_map[self._test_class.obj_name()]) ] mock_calls.extend(self._get_synthetic_fields_get_objects_calls( self.db_objs)) @@ -404,10 +575,10 @@ objs = self._test_class.get_objects(self.context, **self.valid_field_filter) self._validate_objects(self.db_objs, objs) - mock_calls = [ mock.call( - self.context, self._test_class.db_model, _pager=None, + self.context, self._test_class.db_model, + _pager=self.pager_map[self._test_class.obj_name()], **self._test_class.modify_fields_to_db(self.valid_field_filter) ) ] @@ -416,13 +587,17 @@ get_objects_mock.assert_has_calls(mock_calls) def test_get_objects_mixed_fields(self): - synthetic_fields = self._test_class.synthetic_fields + synthetic_fields = ( + set(self._test_class.synthetic_fields) - + self._test_class.extra_filter_names + ) if not synthetic_fields: - self.skipTest('No synthetic fields found in test class %r' % + self.skipTest('No synthetic fields that are not extra filters ' + 'found in test class %r' % self._test_class) filters = copy.copy(self.valid_field_filter) - filters[synthetic_fields[0]] = 'xxx' + filters[synthetic_fields.pop()] = 'xxx' with mock.patch.object(obj_db_api, 'get_objects', return_value=self.db_objs): @@ -430,17 +605,21 @@ self._test_class.get_objects, self.context, **filters) - def test_get_objects_synthetic_fields(self): - synthetic_fields = self._test_class.synthetic_fields + def test_get_objects_synthetic_fields_not_extra_filters(self): + synthetic_fields = ( + set(self._test_class.synthetic_fields) - + self._test_class.extra_filter_names + ) if not synthetic_fields: - self.skipTest('No synthetic fields found in test class %r' % + self.skipTest('No synthetic fields that are not extra filters ' + 'found in test class %r' % self._test_class) with mock.patch.object(obj_db_api, 'get_objects', side_effect=self.fake_get_objects): self.assertRaises(base.exceptions.InvalidInput, self._test_class.get_objects, self.context, - **{synthetic_fields[0]: 'xxx'}) + **{synthetic_fields.pop(): 'xxx'}) def test_get_objects_invalid_fields(self): with mock.patch.object(obj_db_api, 'get_objects', @@ -449,6 +628,19 @@ self._test_class.get_objects, self.context, fake_field='xxx') + def test_count(self): + if not isinstance(self._test_class, base.NeutronDbObject): + self.skipTest('Class %s does not inherit from NeutronDbObject' % + self._test_class) + expected = 10 + with mock.patch.object(obj_db_api, 'count', return_value=expected): + self.assertEqual(expected, self._test_class.count(self.context)) + + def test_count_invalid_fields(self): + self.assertRaises(base.exceptions.InvalidInput, + self._test_class.count, self.context, + fake_field='xxx') + def _validate_objects(self, expected, observed): self.assertTrue(all(self._is_test_class(obj) for obj in observed)) self.assertEqual( @@ -491,7 +683,7 @@ obj = self._test_class(self.context, **self.obj_fields[0]) self.assertRaises(base.NeutronDbObjectDuplicateEntry, obj.create) - def test_update_nonidentifying_fields(self): + def test_update_fields(self): if not self._test_class.primary_keys: self.skipTest( 'Test class %r has no primary keys' % self._test_class) @@ -499,19 +691,42 @@ with mock.patch.object(obj_base.VersionedObject, 'obj_reset_changes'): expected = self._test_class(self.context, **self.obj_fields[0]) for key, val in self.obj_fields[1].items(): - if key not in expected.primary_keys: + if key not in expected.fields_no_update: setattr(expected, key, val) observed = self._test_class(self.context, **self.obj_fields[0]) - observed.update_nonidentifying_fields(self.obj_fields[1], - reset_changes=True) + observed.update_fields(self.obj_fields[1], reset_changes=True) self.assertEqual(expected, observed) self.assertTrue(observed.obj_reset_changes.called) with mock.patch.object(obj_base.VersionedObject, 'obj_reset_changes'): obj = self._test_class(self.context, **self.obj_fields[0]) - obj.update_nonidentifying_fields(self.obj_fields[1]) + obj.update_fields(self.obj_fields[1]) self.assertFalse(obj.obj_reset_changes.called) + def test_extra_fields(self): + if not len(self._test_class.obj_extra_fields): + self.skipTest( + 'Test class %r has no obj_extra_fields' % self._test_class) + obj = self._test_class(self.context, **self.obj_fields[0]) + for field in self._test_class.obj_extra_fields: + # field is accessible and cannot be set by any value + getattr(obj, field) + self.assertTrue(field in obj.to_dict().keys()) + self.assertRaises(AttributeError, setattr, obj, field, "1") + + def test_fields_no_update(self): + obj = self._test_class(self.context, **self.obj_fields[0]) + for field in self._test_class.fields_no_update: + self.assertTrue(hasattr(obj, field)) + + def test_get_tenant_id(self): + if not hasattr(self._test_class, 'project_id'): + self.skipTest( + 'Test class %r has no project_id field' % self._test_class) + obj = self._test_class(self.context, **self.obj_fields[0]) + project_id = self.obj_fields[0]['project_id'] + self.assertEqual(project_id, obj.tenant_id) + @mock.patch.object(obj_db_api, 'update_object') def test_update_no_changes(self, update_mock): with mock.patch.object(base.NeutronDbObject, @@ -614,26 +829,20 @@ def test_to_dict_synthetic_fields(self): cls_ = self._test_class - object_fields = [ - field - for field in cls_.synthetic_fields - if cls_.is_object_field(field) - ] + object_fields = self._get_object_synthetic_fields(cls_) if not object_fields: self.skipTest( 'No object fields found in test class %r' % cls_) for field in object_fields: obj = cls_(self.context, **self.obj_fields[0]) - objclasses = obj_base.VersionedObjectRegistry.obj_classes( - ).get(cls_.fields[field].objname) - if not objclasses: - # NOTE(ihrachys): this test does not handle fields of types - # that are not registered (for example, QosRule) + objclass = self._get_ovo_object_class(cls_, field) + if not objclass: continue - objclass = objclasses[0] + child = objclass( - self.context, **self.get_random_fields(obj_cls=objclass) + self.context, **objclass.modify_fields_from_db( + self.get_random_fields(obj_cls=objclass)) ) child_dict = child.to_dict() if isinstance(cls_.fields[field], obj_fields.ListOfObjectsField): @@ -663,6 +872,50 @@ _test_class = FakeNeutronObjectCompositePrimaryKey +class BaseDbObjectUniqueKeysTestCase(BaseObjectIfaceTestCase): + + _test_class = FakeNeutronObjectUniqueKey + + +class UniqueKeysTestCase(test_base.BaseTestCase): + + def test_class_creation(self): + m_get_unique_keys = mock.patch.object(model_base, 'get_unique_keys') + with m_get_unique_keys as get_unique_keys: + get_unique_keys.return_value = [['field1'], + ['field2', 'db_field3']] + + @obj_base.VersionedObjectRegistry.register_if(False) + class UniqueKeysTestObject(base.NeutronDbObject): + # Version 1.0: Initial version + VERSION = '1.0' + + db_model = FakeModel + + primary_keys = ['id'] + + fields = { + 'id': obj_fields.UUIDField(), + 'field1': obj_fields.UUIDField(), + 'field2': obj_fields.UUIDField(), + 'field3': obj_fields.UUIDField(), + } + + fields_need_translation = {'field3': 'db_field3'} + expected = {('field1',), ('field2', 'field3')} + observed = {tuple(sorted(key)) + for key in UniqueKeysTestObject.unique_keys} + self.assertEqual(expected, observed) + + +class NeutronObjectCountTestCase(test_base.BaseTestCase): + + def test_count(self): + expected = 10 + self.assertEqual( + expected, FakeNeutronObject.count(None, count=expected)) + + class BaseDbObjectCompositePrimaryKeyWithIdTestCase(BaseObjectIfaceTestCase): _test_class = FakeNeutronObjectCompositePrimaryKeyWithId @@ -673,6 +926,24 @@ _test_class = FakeNeutronObjectRenamedField +class BaseObjectIfaceWithProjectIdTestCase(BaseObjectIfaceTestCase): + + _test_class = FakeNeutronObjectWithProjectId + + def test_update_fields_using_tenant_id(self): + obj = self._test_class(self.context, **self.obj_fields[0]) + obj.obj_reset_changes() + + tenant_id = obj['tenant_id'] + new_obj_fields = dict() + new_obj_fields['tenant_id'] = uuidutils.generate_uuid() + new_obj_fields['field2'] = uuidutils.generate_uuid() + + obj.update_fields(new_obj_fields) + self.assertEqual(set(['field2']), obj.obj_what_changed()) + self.assertEqual(tenant_id, obj.project_id) + + class BaseDbObjectMultipleForeignKeysTestCase(_BaseObjectTestCase, test_base.BaseTestCase): @@ -684,10 +955,45 @@ obj.load_synthetic_db_fields) -class BaseDbObjectTestCase(_BaseObjectTestCase): +class BaseDbObjectMultipleParentsForForeignKeysTestCase( + _BaseObjectTestCase, + test_base.BaseTestCase): + + _test_class = FakeParent + + def test_load_synthetic_db_fields_with_multiple_parents(self): + child_cls = FakeSmallNeutronObjectWithMultipleParents + self.obj_registry.register(child_cls) + self.obj_registry.register(FakeParent) + obj = self._test_class(self.context, **self.obj_fields[0]) + fake_children = [ + child_cls( + self.context, **child_cls.modify_fields_from_db( + self.get_random_fields(obj_cls=child_cls)) + ) + for _ in range(5) + ] + with mock.patch.object(child_cls, 'get_objects', + return_value=fake_children) as get_objects: + obj.load_synthetic_db_fields() + get_objects.assert_called_once_with(self.context, field1=obj.id) + self.assertEqual(fake_children, obj.children) + + +class BaseDbObjectTestCase(_BaseObjectTestCase, + test_db_base_plugin_v2.DbOperationBoundMixin): def setUp(self): super(BaseDbObjectTestCase, self).setUp() self.useFixture(tools.CommonDbMixinHooksFixture()) + synthetic_fields = self._get_object_synthetic_fields(self._test_class) + for synth_field in synthetic_fields: + objclass = self._get_ovo_object_class(self._test_class, + synth_field) + if not objclass: + continue + for db_obj in self.db_objs: + objclass_fields = self.get_random_fields(objclass) + db_obj[synth_field] = [objclass_fields] def _create_test_network(self): # TODO(ihrachys): replace with network.create() once we get an object @@ -696,13 +1002,19 @@ models_v2.Network, {'name': 'test-network1'}) + def _create_network(self): + name = "test-network-%s" % tools.get_random_string(4) + return obj_db_api.create_object(self.context, + models_v2.Network, + {'name': name}) + def _create_test_subnet(self, network): test_subnet = { - 'tenant_id': uuidutils.generate_uuid(), + 'project_id': uuidutils.generate_uuid(), 'name': 'test-subnet1', 'network_id': network['id'], 'ip_version': 4, - 'cidr': '10.0.0.0/24', + 'cidr': netaddr.IPNetwork('10.0.0.0/24'), 'gateway_ip': '10.0.0.1', 'enable_dhcp': 1, 'ipv6_ra_mode': None, @@ -736,10 +1048,22 @@ # implementation for ports return obj_db_api.create_object(self.context, models_v2.Port, attrs) + def _create_test_segment(self, network): + test_segment = { + 'network_id': network['id'], + 'network_type': 'vxlan', + } + # TODO(korzen): replace with segment.create() once we get an object + # implementation for segments + self._segment = obj_db_api.create_object(self.context, + segments_db.NetworkSegment, + test_segment) + def _create_test_port(self, network): self._port = self._create_port(network_id=network['id']) def _make_object(self, fields): + fields = get_non_synthetic_fields(self._test_class, fields) return self._test_class( self.context, **remove_timestamps_from_fields(fields)) @@ -849,6 +1173,91 @@ self._test_class.get_objects(self.context, foo=42) self.assertEqual({'foo': [42]}, self.filtered_args) + def test_filtering_by_fields(self): + obj = self._make_object(self.obj_fields[0]) + obj.create() + + for field in remove_timestamps_from_fields(self.obj_fields[0]): + filters = {field: [self.obj_fields[0][field]]} + new = self._test_class.get_objects(self.context, **filters) + self.assertEqual([obj], new, 'Filtering by %s failed.' % field) + + def _get_non_synth_fields(self, objclass, db_attrs): + fields = objclass.modify_fields_from_db(db_attrs) + fields = remove_timestamps_from_fields(fields) + fields = get_non_synthetic_fields(objclass, fields) + return fields + + def _create_object_with_synthetic_fields(self, db_obj): + cls_ = self._test_class + object_fields = self._get_object_synthetic_fields(cls_) + + # create base object + obj = cls_(self.context, **self._get_non_synth_fields(cls_, db_obj)) + obj.create() + + # create objects that are going to be loaded into the base object + # through synthetic fields + for field in object_fields: + objclass = self._get_ovo_object_class(cls_, field) + if not objclass: + continue + objclass_fields = self._get_non_synth_fields(objclass, + db_obj[field][0]) + + # make sure children point to the base object + foreign_keys = objclass.foreign_keys.get(obj.__class__.__name__) + for local_field, foreign_key in foreign_keys.items(): + objclass_fields[local_field] = obj.get(foreign_key) + + synth_field_obj = objclass(self.context, **objclass_fields) + synth_field_obj.create() + + # populate the base object synthetic fields with created children + if isinstance(cls_.fields[field], obj_fields.ObjectField): + setattr(obj, field, synth_field_obj) + else: + setattr(obj, field, [synth_field_obj]) + + # reset the object so that we can compare it to other clean objects + obj.obj_reset_changes([field]) + return obj + + def test_get_object_with_synthetic_fields(self): + object_fields = self._get_object_synthetic_fields(self._test_class) + if not object_fields: + self.skipTest( + 'No synthetic object fields found ' + 'in test class %r' % self._test_class + ) + obj = self._create_object_with_synthetic_fields(self.db_objs[0]) + listed_obj = self._test_class.get_object( + self.context, **obj._get_composite_keys()) + self.assertTrue(listed_obj) + self.assertEqual(obj, listed_obj) + + # NOTE(korzen) _list method is used in neutron.tests.db.unit.db. + # test_db_base_plugin_v2.DbOperationBoundMixin in _list_and_count_queries() + # This is used in test_subnet for asserting that number of queries is + # constant. It can be used also for port and network objects when ready. + def _list(self, resource, neutron_context): + cls_ = resource + return cls_.get_objects(neutron_context) + + def test_get_objects_queries_constant(self): + iter_db_obj = iter(self.db_objs) + + def _create(): + self._create_object_with_synthetic_fields(next(iter_db_obj)) + + self._assert_object_list_queries_constant(_create, self._test_class) + + def test_count(self): + for fields in self.obj_fields: + self._make_object(fields).create() + self.assertEqual( + len(self.obj_fields), self._test_class.count(self.context)) + class UniqueObjectBase(test_base.BaseTestCase): def setUp(self): @@ -885,5 +1294,15 @@ self.assertNotIn( filter_name, self.registered_object.extra_filter_names) base.register_filter_hook_on_model( - FakeNeutronObject.db_model, filter_name) + FakeNeutronDbObject.db_model, filter_name) self.assertIn(filter_name, self.registered_object.extra_filter_names) + + +class PagerTestCase(test_base.BaseTestCase): + def test_comparison(self): + pager = base.Pager(sorts=[('order', True)]) + pager2 = base.Pager(sorts=[('order', True)]) + self.assertEqual(pager, pager2) + + pager3 = base.Pager() + self.assertNotEqual(pager, pager3) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_common_types.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_common_types.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_common_types.py 2016-05-25 11:54:23.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_common_types.py 2016-08-29 20:05:49.000000000 +0000 @@ -12,6 +12,8 @@ # under the License. import abc +import itertools +import random from neutron_lib import constants as const @@ -55,7 +57,7 @@ super(IPV6ModeEnumFieldTest, self).setUp() self.field = common_types.IPV6ModeEnumField() self.coerce_good_values = [(mode, mode) - for mode in constants.IPV6_MODES] + for mode in const.IPV6_MODES] self.coerce_bad_values = ['6', 4, 'type', 'slaacc'] self.to_primitive_values = self.coerce_good_values self.from_primitive_values = self.coerce_good_values @@ -187,10 +189,31 @@ def setUp(self): super(IpProtocolEnumFieldTest, self).setUp() self.field = common_types.IpProtocolEnumField() - self.coerce_good_values = [(val, val) - for val in - list(const.IP_PROTOCOL_MAP.keys())] - self.coerce_bad_values = ['test', '8', 10, 'Udp'] + self.coerce_good_values = [ + (val, val) + for val in itertools.chain( + const.IP_PROTOCOL_MAP.keys(), + [str(v) for v in const.IP_PROTOCOL_MAP.values()] + ) + ] + self.coerce_bad_values = ['test', 'Udp', 256] + try: + # pick a random protocol number that is not in the map of supported + # protocols + self.coerce_bad_values.append( + str( + random.choice( + list( + set(range(256)) - + set(const.IP_PROTOCOL_MAP.values()) + ) + ) + ) + ) + except IndexError: + # stay paranoid and guard against the impossible future when all + # protocols are in the map + pass self.to_primitive_values = self.coerce_good_values self.from_primitive_values = self.coerce_good_values diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_objects.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_objects.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_objects.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_objects.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,30 +18,35 @@ from oslo_versionedobjects import base as obj_base from oslo_versionedobjects import fixture +from neutron.common import utils from neutron import objects from neutron.tests import base as test_base -from neutron.tests import tools # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. object_data = { + '_DefaultSecurityGroup': '1.0-971520cb2e0ec06d747885a0cf78347f', 'AddressScope': '1.0-25560799db384acfe1549634959a82b4', 'DNSNameServer': '1.0-bf87a85327e2d812d1666ede99d9918b', 'ExtraDhcpOpt': '1.0-632f689cbeb36328995a7aed1d0a78d3', 'IPAllocationPool': '1.0-371016a6480ed0b4299319cb46d9215d', - 'PortSecurity': '1.0-cf5b382a0112080ec4e0f23f697c7ab2', + 'NetworkPortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3', + 'NetworkSegment': '1.0-865567a6f70eb85cf33fb7a5575a4eab', + 'PortSecurity': '1.0-b30802391a87945ee9c07582b4ff95e3', 'AllowedAddressPair': '1.0-9f9186b6f952fbf31d257b0458b852c0', 'QosBandwidthLimitRule': '1.1-4e44a8f5c2895ab1278399f87b40a13d', 'QosDscpMarkingRule': '1.1-0313c6554b34fd10c753cb63d638256c', 'QosRuleType': '1.1-8a53fef4c6a43839d477a85b787d22ce', 'QosPolicy': '1.1-7c5659e1c1f64395223592d3d3293e22', 'Route': '1.0-a9883a63b416126f9e345523ec09483b', - 'Subnet': '1.0-ebb5120b90ecd4eb9207d3ae5c209f19', - 'SubnetPool': '1.0-cc182c15eca5ece10c74f923d066163a', + 'SecurityGroup': '1.0-e26b90c409b31fd2e3c6fcec402ac0b9', + 'SecurityGroupRule': '1.0-e9b8dace9d48b936c62ad40fe1f339d5', + 'Subnet': '1.0-b71e720f45fff2a39759940e010be7d1', + 'SubnetPool': '1.0-e8300bfbc4762cc88a7f6205b52da2f8', 'SubnetPoolPrefix': '1.0-13c15144135eb869faa4a76dc3ee3b6c', 'SubPort': '1.0-72c8471068db1f0491b5480fe49b52bb', - 'Trunk': '1.0-4963426d21a268170b7e69015e004fc5', + 'Trunk': '1.0-80ebebb57f2b0dbb510f84d91421ed10', } @@ -51,7 +56,7 @@ super(TestObjectVersions, self).setUp() # NOTE(ihrachys): seed registry with all objects under neutron.objects # before validating the hashes - tools.import_modules_recursively(os.path.dirname(objects.__file__)) + utils.import_modules_recursively(os.path.dirname(objects.__file__)) def test_versions(self): checker = fixture.ObjectVersionChecker( @@ -59,8 +64,8 @@ fingerprints = checker.get_hashes() if os.getenv('GENERATE_HASHES'): - file('object_hashes.txt', 'w').write( - pprint.pformat(fingerprints)) + with open('object_hashes.txt', 'w') as hashes_file: + hashes_file.write(pprint.pformat(fingerprints)) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_securitygroup.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_securitygroup.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_securitygroup.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_securitygroup.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,145 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools + +from neutron.objects import securitygroup +from neutron.tests.unit.objects import test_base +from neutron.tests.unit import testlib_api + + +class SecurityGroupIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): + + _test_class = securitygroup.SecurityGroup + + +class SecurityGroupDbObjTestCase(test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = securitygroup.SecurityGroup + + def setUp(self): + super(SecurityGroupDbObjTestCase, self).setUp() + # TODO(ihrachys): consider refactoring base test class to set None for + # all nullable fields + for db_obj in self.db_objs: + for rule in db_obj['rules']: + # we either make it null, or create remote groups for each rule + # generated; we picked the former here + rule['remote_group_id'] = None + + def test_is_default_True(self): + fields = self.obj_fields[0].copy() + sg_obj = self._make_object(fields) + sg_obj.is_default = True + sg_obj.create() + + default_sg_obj = securitygroup._DefaultSecurityGroup.get_object( + self.context, + project_id=sg_obj.project_id, + security_group_id=sg_obj.id) + self.assertIsNotNone(default_sg_obj) + + sg_obj = securitygroup.SecurityGroup.get_object( + self.context, + id=sg_obj.id, + project_id=sg_obj.project_id + ) + self.assertTrue(sg_obj.is_default) + + def test_is_default_False(self): + fields = self.obj_fields[0].copy() + sg_obj = self._make_object(fields) + sg_obj.is_default = False + sg_obj.create() + + default_sg_obj = securitygroup._DefaultSecurityGroup.get_object( + self.context, + project_id=sg_obj.project_id, + security_group_id=sg_obj.id) + self.assertIsNone(default_sg_obj) + + sg_obj = securitygroup.SecurityGroup.get_object( + self.context, + id=sg_obj.id, + project_id=sg_obj.project_id + ) + self.assertFalse(sg_obj.is_default) + + def test_get_object_filter_by_is_default(self): + fields = self.obj_fields[0].copy() + sg_obj = self._make_object(fields) + sg_obj.is_default = True + sg_obj.create() + + listed_obj = securitygroup.SecurityGroup.get_object( + self.context, + id=sg_obj.id, + project_id=sg_obj.project_id, + is_default=True + ) + self.assertIsNotNone(listed_obj) + self.assertEqual(sg_obj, listed_obj) + + def test_get_objects_queries_constant(self): + # TODO(electrocucaracha) SecurityGroup is using SecurityGroupRule + # object to reload rules, which costs extra SQL query each time + # _load_is_default are called in get_object(s). SecurityGroup has + # defined relationship for SecurityGroupRules, so it should be possible + # to reuse side loaded values fo this. To be reworked in follow-up + # patch. + pass + + +class DefaultSecurityGroupIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): + + _test_class = securitygroup._DefaultSecurityGroup + + +class DefaultSecurityGroupDbObjTestCase(test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = securitygroup._DefaultSecurityGroup + + def setUp(self): + super(DefaultSecurityGroupDbObjTestCase, self).setUp() + sg_db_obj = self.get_random_fields(securitygroup.SecurityGroup) + sg_fields = securitygroup.SecurityGroup.modify_fields_from_db( + sg_db_obj) + self.sg_obj = securitygroup.SecurityGroup( + self.context, **test_base.remove_timestamps_from_fields(sg_fields)) + self.sg_obj.create() + for obj in itertools.chain(self.db_objs, self.obj_fields): + obj['security_group_id'] = self.sg_obj['id'] + + +class SecurityGroupRuleIfaceObjTestCase(test_base.BaseObjectIfaceTestCase): + + _test_class = securitygroup.SecurityGroupRule + + +class SecurityGroupRuleDbObjTestCase(test_base.BaseDbObjectTestCase, + testlib_api.SqlTestCase): + + _test_class = securitygroup.SecurityGroupRule + + def setUp(self): + super(SecurityGroupRuleDbObjTestCase, self).setUp() + sg_db_obj = self.get_random_fields(securitygroup.SecurityGroup) + sg_fields = securitygroup.SecurityGroup.modify_fields_from_db( + sg_db_obj) + self.sg_obj = securitygroup.SecurityGroup( + self.context, **test_base.remove_timestamps_from_fields(sg_fields)) + self.sg_obj.create() + for obj in itertools.chain(self.db_objs, self.obj_fields): + obj['security_group_id'] = self.sg_obj['id'] + obj['remote_group_id'] = self.sg_obj['id'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_subnetpool.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_subnetpool.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_subnetpool.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_subnetpool.py 2016-08-03 20:10:34.000000000 +0000 @@ -62,6 +62,14 @@ pool = self._test_class.get_object(self.context, id=self._pool.id) self.assertItemsEqual(prefixes, pool.prefixes) + def test_get_objects_queries_constant(self): + # TODO(korzen) SubnetPool is using SubnetPoolPrefix object to reload + # prefixes, which costs extra SQL query each time reload_prefixes + # are called in get_object(s). SubnetPool has defined relationship + # for SubnetPoolPrefixes, so it should be possible to reuse side loaded + # values fo this. To be reworked in follow-up patch. + pass + class SubnetPoolPrefixIfaceObjectTestCase( obj_test_base.BaseObjectIfaceTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_subnet.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_subnet.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_subnet.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_subnet.py 2016-08-03 20:10:34.000000000 +0000 @@ -11,7 +11,14 @@ # under the License. import itertools +from operator import itemgetter +from oslo_utils import uuidutils + +from neutron import context +from neutron.db import rbac_db_models +from neutron.objects import base as obj_base +from neutron.objects.db import api as obj_db_api from neutron.objects import subnet from neutron.tests.unit.objects import test_base as obj_test_base from neutron.tests.unit import testlib_api @@ -40,6 +47,11 @@ _test_class = subnet.DNSNameServer + def setUp(self): + super(DNSNameServerObjectIfaceTestCase, self).setUp() + self.pager_map[self._test_class.obj_name()] = ( + obj_base.Pager(sorts=[('order', True)])) + class DNSNameServerDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): @@ -48,11 +60,59 @@ def setUp(self): super(DNSNameServerDbObjectTestCase, self).setUp() + # (NOTE) If two object have the same value for a field and + # they are sorted using that field, the order is not deterministic. + # To avoid breaking the tests we ensure unique values for every field + while not self._is_objects_unique(): + self.db_objs = list(self.get_random_fields() for _ in range(3)) + self.obj_fields = [self._test_class.modify_fields_from_db(db_obj) + for db_obj in self.db_objs] self._create_test_network() self._create_test_subnet(self._network) for obj in itertools.chain(self.db_objs, self.obj_fields): obj['subnet_id'] = self._subnet['id'] + def _is_objects_unique(self): + order_set = set([x['order'] for x in self.db_objs]) + subnet_id_set = set([x['subnet_id'] for x in self.db_objs]) + address_set = set([x['address'] for x in self.db_objs]) + return 3 == len(order_set) == len(subnet_id_set) == len(address_set) + + def _create_dnsnameservers(self): + for obj in self.obj_fields: + dns = self._make_object(obj) + dns.create() + + def test_get_objects_sort_by_order_asc(self): + self._create_dnsnameservers() + objs = self._test_class.get_objects(self.context) + fields_sorted = sorted([dict(obj) for obj in self.obj_fields], + key=itemgetter('order')) + self.assertEqual( + fields_sorted, + [obj_test_base.get_obj_db_fields(obj) for obj in objs]) + + def test_get_objects_sort_by_order_desc(self): + self._create_dnsnameservers() + pager = obj_base.Pager(sorts=[('order', False)]) + objs = self._test_class.get_objects(self.context, _pager=pager, + subnet_id=self._subnet.id) + fields_sorted = sorted([dict(obj) for obj in self.obj_fields], + key=itemgetter('order'), reverse=True) + self.assertEqual( + fields_sorted, + [obj_test_base.get_obj_db_fields(obj) for obj in objs]) + + def test_get_objects_sort_by_address_asc_using_pager(self): + self._create_dnsnameservers() + pager = obj_base.Pager(sorts=[('address', True)]) + objs = self._test_class.get_objects(self.context, _pager=pager) + fields_sorted = sorted([dict(obj) for obj in self.obj_fields], + key=itemgetter('address')) + self.assertEqual( + fields_sorted, + [obj_test_base.get_obj_db_fields(obj) for obj in objs]) + class RouteObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase): @@ -76,6 +136,11 @@ _test_class = subnet.Subnet + def setUp(self): + super(SubnetObjectIfaceTestCase, self).setUp() + self.pager_map[subnet.DNSNameServer.obj_name()] = ( + obj_base.Pager(sorts=[('order', True)])) + class SubnetDbObjectTestCase(obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): @@ -85,5 +150,97 @@ def setUp(self): super(SubnetDbObjectTestCase, self).setUp() self._create_test_network() + self._create_test_segment(self._network) for obj in itertools.chain(self.db_objs, self.obj_fields): obj['network_id'] = self._network['id'] + obj['segment_id'] = self._segment['id'] + + def test_get_dns_nameservers_in_order(self): + obj = self._make_object(self.obj_fields[0]) + obj.create() + dns_nameservers = [(2, '1.2.3.4'), (1, '5.6.7.8'), (4, '7.7.7.7')] + for order, address in dns_nameservers: + dns = subnet.DNSNameServer(self.context, order=order, + address=address, + subnet_id=obj.id) + dns.create() + + new = self._test_class.get_object(self.context, id=obj.id) + self.assertEqual(1, new.dns_nameservers[0].order) + self.assertEqual(2, new.dns_nameservers[1].order) + self.assertEqual(4, new.dns_nameservers[-1].order) + + def _create_shared_network_rbac_entry(self, network): + attrs = { + 'object_id': network['id'], + 'target_tenant': '*', + 'action': rbac_db_models.ACCESS_SHARED + } + obj_db_api.create_object(self.context, rbac_db_models.NetworkRBAC, + attrs) + + def test_get_subnet_shared_true(self): + network = self._create_network() + self._create_shared_network_rbac_entry(network) + subnet_data = dict(self.obj_fields[0]) + subnet_data['network_id'] = network['id'] + + obj = self._make_object(subnet_data) + # check if shared will be load by 'obj_load_attr' and using extra query + # by RbacNeutronDbObjectMixin get_shared_with_tenant + self.assertTrue(obj.shared) + obj.create() + # here the shared should be load by is_network_shared + self.assertTrue(obj.shared) + + new = self._test_class.get_object(self.context, + **obj._get_composite_keys()) + # again, the shared should be load by is_network_shared + self.assertTrue(new.shared) + + def test_filter_by_shared(self): + network = self._create_network() + self._create_shared_network_rbac_entry(network) + + subnet_data = dict(self.obj_fields[0]) + subnet_data['network_id'] = network['id'] + obj = self._make_object(subnet_data) + obj.create() + + result = self._test_class.get_objects(self.context, shared=True) + + self.assertEqual(obj, result[0]) + + def test_get_shared_subnet_with_another_tenant(self): + network_shared = self._create_network() + self._create_shared_network_rbac_entry(network_shared) + + subnet_data = dict(self.obj_fields[0]) + subnet_data['network_id'] = network_shared['id'] + shared_subnet = self._make_object(subnet_data) + shared_subnet.create() + + priv_subnet = self._make_object(self.obj_fields[1]) + priv_subnet.create() + + # Situation here: + # - we have one network with a subnet that are private + # - shared network with its subnet + # creating new context, user should have access to one shared network + + all_subnets = self._test_class.get_objects(self.context) + self.assertEqual(2, len(all_subnets)) + + # access with new tenant_id, should be able to access to one subnet + new_ctx = context.Context('', uuidutils.generate_uuid()) + public_subnets = self._test_class.get_objects(new_ctx) + self.assertEqual([shared_subnet], public_subnets) + + # test get_object to fetch the private and then the shared subnet + fetched_private_subnet = self._test_class.get_object(new_ctx, + id=priv_subnet.id) + self.assertIsNone(fetched_private_subnet) + + fetched_public_subnet = ( + self._test_class.get_object(new_ctx, id=shared_subnet.id)) + self.assertEqual(shared_subnet, fetched_public_subnet) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_trunk.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_trunk.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_trunk.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_trunk.py 2016-08-03 20:10:34.000000000 +0000 @@ -13,9 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. +import itertools + +import mock from neutron_lib import exceptions as n_exc +from oslo_db import exception as obj_exc from oslo_utils import uuidutils +from neutron.objects.db import api as obj_db_api from neutron.objects import trunk as t_obj from neutron.services.trunk import exceptions as t_exc from neutron.tests.unit.objects import test_base @@ -26,6 +31,12 @@ _test_class = t_obj.SubPort + def test_create_duplicates(self): + with mock.patch.object(obj_db_api, 'create_object', + side_effect=obj_exc.DBDuplicateEntry): + obj = self._test_class(self.context, **self.obj_fields[0]) + self.assertRaises(t_exc.DuplicateSubPort, obj.create) + class SubPortDbObjectTestCase(test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase): @@ -75,8 +86,11 @@ super(TrunkDbObjectTestCase, self).setUp() self._create_test_network() + sub_ports = [] + for obj in self.db_objs: + sub_ports.extend(obj['sub_ports']) - for obj in self.obj_fields: + for obj in itertools.chain(self.obj_fields, sub_ports): self._create_port(id=obj['port_id'], network_id=self._network['id']) @@ -87,27 +101,52 @@ trunk = self._make_object(obj) self.assertRaises(n_exc.PortNotFound, trunk.create) - def test_create_with_sub_ports(self): + def _test_create_trunk_with_subports(self, port_id, vids): tenant_id = uuidutils.generate_uuid() - def _as_tuple(sub_port): - return (sub_port['port_id'], - sub_port['segmentation_type'], - sub_port['segmentation_id']) - sub_ports = [] - for vid in range(1, 3): + for vid in vids: port = self._create_port(network_id=self._network['id']) sub_ports.append(t_obj.SubPort(self.context, port_id=port['id'], segmentation_type='vlan', segmentation_id=vid)) - expected = set(map(_as_tuple, sub_ports)) - - trunk = t_obj.Trunk(self.context, port_id=self.db_obj['port_id'], + trunk = t_obj.Trunk(self.context, port_id=port_id, sub_ports=sub_ports, tenant_id=tenant_id) trunk.create() + self.assertEqual(sub_ports, trunk.sub_ports) + return trunk + + def test_create_with_sub_ports(self): + trunk = self._test_create_trunk_with_subports(self.db_obj['port_id'], + [1, 2]) + + def _as_tuple(sub_port): + return (sub_port['port_id'], + sub_port['segmentation_type'], + sub_port['segmentation_id']) + + expected = {_as_tuple(port) for port in trunk.sub_ports} sub_ports = t_obj.SubPort.get_objects(self.context, trunk_id=trunk.id) + self.assertEqual(expected, {_as_tuple(port) for port in sub_ports}) - self.assertEqual(expected, set(map(_as_tuple, trunk.sub_ports))) - self.assertEqual(expected, set(map(_as_tuple, sub_ports))) + def test_get_object_includes_correct_subports(self): + trunk1_vids = [1, 2, 3] + trunk2_vids = [4, 5, 6] + port_id1 = self.db_obj['port_id'] + trunk1 = self._test_create_trunk_with_subports(port_id1, trunk1_vids) + + port_id2 = uuidutils.generate_uuid() + self._create_port(id=port_id2, + network_id=self._network['id']) + self._test_create_trunk_with_subports(port_id2, trunk2_vids) + + listed_trunk1 = t_obj.Trunk.get_object( + self.context, + id=trunk1.id, + port_id=port_id1 + ) + self.assertEqual( + set(trunk1_vids), + {sp.segmentation_id for sp in listed_trunk1.sub_ports} + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_utils.py neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/objects/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/objects/test_utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions +from neutron.objects import utils +from neutron.tests import base as test_base + + +class TestConvertFilters(test_base.BaseTestCase): + + def test_convert_filters_no_tenant_id(self): + kwargs = { + 'filter%d' % i: 'value%d' % i + for i in range(0, 10) + } + self.assertEqual(kwargs, utils.convert_filters(**kwargs)) + + def test_convert_filters_tenant_id(self): + expected_project_id = 'fake-tenant-id' + kwargs = { + 'filter%d' % i: 'value%d' % i + for i in range(0, 10) + } + expected = kwargs.copy() + expected['project_id'] = expected_project_id + + self.assertEqual( + expected, + utils.convert_filters(tenant_id=expected_project_id, **kwargs) + ) + + def test_convert_filters_tenant_id_and_project_id_raises(self): + kwargs = { + 'filter%d' % i: 'value%d' % i + for i in range(0, 10) + } + kwargs['tenant_id'] = 'fake-tenant-id' + kwargs['project_id'] = 'fake-tenant-id' + + self.assertRaises( + exceptions.TenantIdProjectIdFilterConflict, + utils.convert_filters, **kwargs + ) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/common/test_utils.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/common/test_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/common/test_utils.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/common/test_utils.py 2016-08-03 20:10:34.000000000 +0000 @@ -16,6 +16,7 @@ import mock from neutron_lib import constants +import testtools from neutron.plugins.common import utils from neutron.tests import base @@ -70,3 +71,30 @@ self.assertEqual(12, len(utils.get_interface_name(LONG_NAME1, prefix="pre-", max_len=12))) + + def test_delete_port_on_error(self): + core_plugin, context = mock.Mock(), mock.Mock() + port_id = 'pid' + with testtools.ExpectedException(ValueError): + with utils.delete_port_on_error(core_plugin, context, port_id): + raise ValueError() + core_plugin.delete_port.assert_called_once_with(context, port_id, + l3_port_check=False) + + def test_delete_port_on_error_no_delete(self): + core_plugin, context = mock.Mock(), mock.Mock() + with testtools.ExpectedException(ValueError): + with utils.delete_port_on_error(core_plugin, context, 1) as cm: + cm.delete_on_error = False + raise ValueError() + self.assertFalse(core_plugin.delete_port.called) + + def test_delete_port_on_error_fail_port_delete(self): + core_plugin, context = mock.Mock(), mock.Mock() + core_plugin.delete_port.side_effect = TypeError() + port_id = 'pid' + with testtools.ExpectedException(ValueError): + with utils.delete_port_on_error(core_plugin, context, port_id): + raise ValueError() + core_plugin.delete_port.assert_called_once_with(context, port_id, + l3_port_check=False) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/base.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/base.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/base.py 2016-08-03 20:10:34.000000000 +0000 @@ -22,6 +22,12 @@ 'L3RouterPlugin') _mechanism_drivers = ['openvswitch'] + def get_additional_service_plugins(self): + p = super(ML2TestFramework, self).get_additional_service_plugins() + p.update({'flavors_plugin_name': 'neutron.services.flavors.' + 'flavors_plugin.FlavorsPlugin'}) + return p + def setUp(self): super(ML2TestFramework, self).setUp() self.core_plugin = manager.NeutronManager.get_instance().get_plugin() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,40 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.callbacks import events +from neutron.plugins.ml2.drivers.agent import capabilities +from neutron.tests import base + + +class CapabilitiesTest(base.BaseTestCase): + + @mock.patch("neutron.callbacks.manager.CallbacksManager.notify") + def test_notify_init_event(self, mocked_manager): + mock_agent_type = mock.Mock() + mock_agent = mock.Mock() + capabilities.notify_init_event(mock_agent_type, mock_agent) + mocked_manager.assert_called_with(mock_agent_type, + events.AFTER_INIT, + mock_agent, + agent=mock_agent) + + @mock.patch("neutron.callbacks.manager.CallbacksManager.subscribe") + def test_register(self, mocked_subscribe): + mock_callback = mock.Mock() + mock_agent_type = mock.Mock() + capabilities.register(mock_callback, mock_agent_type) + mocked_subscribe.assert_called_with(mock_callback, + mock_agent_type, + events.AFTER_INIT) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -17,8 +17,12 @@ import mock from neutron_lib import constants from oslo_config import cfg +import testtools from neutron.agent.linux import bridge_lib +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.common import constants as n_const from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb from neutron.plugins.ml2.drivers.agent import _common_agent as ca @@ -67,6 +71,36 @@ with mock.patch.object(self.agent, "daemon_loop"): self.agent.start() + def test_treat_devices_removed_notify(self): + handler = mock.Mock() + registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_DELETE) + devices = [DEVICE_1] + self.agent.treat_devices_removed(devices) + handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, + context=mock.ANY, device=DEVICE_1, + port_id=mock.ANY) + + def test_treat_devices_added_updated_notify(self): + handler = mock.Mock() + registry.subscribe(handler, resources.PORT_DEVICE, events.AFTER_UPDATE) + agent = self.agent + mock_details = {'device': 'dev123', + 'port_id': 'port123', + 'network_id': 'net123', + 'admin_state_up': True, + 'network_type': 'vlan', + 'segmentation_id': 100, + 'physical_network': 'physnet1', + 'device_owner': 'horse'} + agent.plugin_rpc = mock.Mock() + agent.plugin_rpc.get_devices_details_list.return_value = [mock_details] + agent.mgr = mock.Mock() + agent.mgr.plug_interface.return_value = True + agent.treat_devices_added_updated(set(['dev123'])) + handler.assert_called_once_with(mock.ANY, mock.ANY, self.agent, + context=mock.ANY, + device_details=mock_details) + def test_treat_devices_removed_with_existed_device(self): agent = self.agent agent.mgr.ensure_port_admin_state = mock.Mock() @@ -448,6 +482,36 @@ agent.treat_devices_added_updated(set(['tap1'])) set_arp.assert_called_with(mock_details['device'], mock_details) + def test__process_device_if_exists_missing_intf(self): + mock_details = {'device': 'dev123', + 'port_id': 'port123', + 'network_id': 'net123', + 'admin_state_up': True, + 'network_type': 'vlan', + 'segmentation_id': 100, + 'physical_network': 'physnet1', + 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} + self.agent.mgr = mock.Mock() + self.agent.mgr.get_all_devices.return_value = [] + self.agent.mgr.plug_interface.side_effect = RuntimeError() + self.agent._process_device_if_exists(mock_details) + + def test__process_device_if_exists_error(self): + mock_details = {'device': 'dev123', + 'port_id': 'port123', + 'network_id': 'net123', + 'admin_state_up': True, + 'network_type': 'vlan', + 'segmentation_id': 100, + 'physical_network': 'physnet1', + 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX} + self.agent.mgr = mock.Mock() + self.agent.mgr.get_all_devices.return_value = ['dev123'] + self.agent.mgr.plug_interface.side_effect = RuntimeError() + with testtools.ExpectedException(RuntimeError): + # device exists so it should raise + self.agent._process_device_if_exists(mock_details) + def test_set_rpc_timeout(self): self.agent.stop() for rpc_client in (self.agent.plugin_rpc.client, diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py 2016-08-03 20:10:34.000000000 +0000 @@ -20,11 +20,14 @@ from testtools import matchers from neutron.db import api as db +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2 import config from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2.drivers import type_tunnel TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" +TUNNEL_IPV6_ONE = "2001:db8:1::10" HOST_ONE = 'fake_host_one' HOST_TWO = 'fake_host_two' TUN_MIN = 100 @@ -355,6 +358,12 @@ 'host': HOST_ONE} self._test_tunnel_sync(kwargs) + def test_tunnel_sync_called_with_host_passed_ipv6(self): + config.cfg.CONF.set_override('overlay_ip_version', 6, group='ml2') + kwargs = {'tunnel_ip': TUNNEL_IPV6_ONE, 'tunnel_type': self.TYPE, + 'host': HOST_ONE} + self._test_tunnel_sync(kwargs) + def test_tunnel_sync_called_for_existing_endpoint(self): self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) @@ -391,3 +400,60 @@ def test_tunnel_sync_called_without_tunnel_type(self): kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None} self._test_tunnel_sync_raises(kwargs) + + def test_tunnel_sync_called_with_tunnel_overlay_mismatch(self): + config.cfg.CONF.set_override('overlay_ip_version', 6, group='ml2') + kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE, + 'host': HOST_ONE} + self._test_tunnel_sync_raises(kwargs) + + def test_tunnel_sync_called_with_tunnel_overlay_mismatch_ipv6(self): + config.cfg.CONF.set_override('overlay_ip_version', 4, group='ml2') + kwargs = {'tunnel_ip': TUNNEL_IPV6_ONE, 'tunnel_type': self.TYPE, + 'host': HOST_ONE} + self._test_tunnel_sync_raises(kwargs) + + +class TunnelTypeMTUTestMixin(object): + + DRIVER_CLASS = None + TYPE = None + ENCAP_OVERHEAD = 0 + + def setUp(self): + super(TunnelTypeMTUTestMixin, self).setUp() + self.driver = self.DRIVER_CLASS() + + def _test_get_mtu(self, ip_version): + config.cfg.CONF.set_override('overlay_ip_version', ip_version, + group='ml2') + ip_header_length = p_const.IP_HEADER_LENGTH[ip_version] + + config.cfg.CONF.set_override('global_physnet_mtu', 1500) + config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') + self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} + self.assertEqual(1475 - self.ENCAP_OVERHEAD - ip_header_length, + self.driver.get_mtu('physnet1')) + + config.cfg.CONF.set_override('global_physnet_mtu', 1450) + config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') + self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1425} + self.assertEqual(1450 - self.ENCAP_OVERHEAD - ip_header_length, + self.driver.get_mtu('physnet1')) + + config.cfg.CONF.set_override('global_physnet_mtu', 0) + config.cfg.CONF.set_override('path_mtu', 1450, group='ml2') + self.driver.physnet_mtus = {'physnet1': 1425, 'physnet2': 1400} + self.assertEqual(1450 - self.ENCAP_OVERHEAD - ip_header_length, + self.driver.get_mtu('physnet1')) + + config.cfg.CONF.set_override('global_physnet_mtu', 0) + config.cfg.CONF.set_override('path_mtu', 0, group='ml2') + self.driver.physnet_mtus = {} + self.assertEqual(0, self.driver.get_mtu('physnet1')) + + def test_get_mtu_ipv4(self): + self._test_get_mtu(4) + + def test_get_mtu_ipv6(self): + self._test_get_mtu(6) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc_base.py 2016-08-29 20:05:49.000000000 +0000 @@ -20,8 +20,9 @@ from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc -from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent from neutron.tests import base +from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent import \ + test_vlanmanager class FakeNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin): @@ -53,6 +54,8 @@ def setUp(self): super(TestL2populationRpcCallBackTunnelMixinBase, self).setUp() + self.vlan_manager = self.useFixture( + test_vlanmanager.LocalVlanManagerFixture()).manager self.fakeagent = FakeNeutronAgent() self.fakebr = mock.Mock() Port = collections.namedtuple('Port', 'ip, ofport') @@ -112,21 +115,13 @@ }, } - self.lvm1 = ovs_neutron_agent.LocalVLANMapping( - self.lvms[0].vlan, self.type_gre, self.lvms[0].phys, - self.lvms[0].segid, {self.lvms[0].vif: self.lvms[0].port}) - self.lvm2 = ovs_neutron_agent.LocalVLANMapping( - self.lvms[1].vlan, self.type_gre, self.lvms[1].phys, - self.lvms[1].segid, {self.lvms[1].vif: self.lvms[1].port}) - self.lvm3 = ovs_neutron_agent.LocalVLANMapping( - self.lvms[2].vlan, self.type_gre, self.lvms[2].phys, - self.lvms[2].segid, {self.lvms[2].vif: self.lvms[2].port}) - - self.local_vlan_map1 = { - self.lvms[0].net: self.lvm1, - self.lvms[1].net: self.lvm2, - self.lvms[2].net: self.lvm3, - } + for i in range(3): + self.vlan_manager.add( + self.lvms[i].net, + self.lvms[i].vlan, self.type_gre, self.lvms[i].phys, + self.lvms[i].segid, {self.lvms[i].vif: self.lvms[i].port}) + setattr(self, 'lvm%d' % i, + self.vlan_manager.get(self.lvms[i].net)) self.upd_fdb_entry1_val = { self.lvms[0].net: { diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/rpc_manager/test_l2population_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -25,20 +25,22 @@ l2population_rpc_base.TestL2populationRpcCallBackTunnelMixinBase): def test_get_agent_ports_no_data(self): + # Make sure vlan manager has no mappings that were added in setUp() + self.vlan_manager.mapping = {} self.assertFalse( - list(self.fakeagent.get_agent_ports(self.fdb_entries1, {}))) + list(self.fakeagent.get_agent_ports(self.fdb_entries1))) def test_get_agent_ports_non_existence_key_in_lvm(self): results = {} - del self.local_vlan_map1[self.lvms[1].net] + self.vlan_manager.pop(self.lvms[1].net) for lvm, agent_ports in self.fakeagent.get_agent_ports( - self.fdb_entries1, self.local_vlan_map1): + self.fdb_entries1): results[lvm] = agent_ports expected = { - self.lvm1: { + self.lvm0: { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.local_ip: []}, - self.lvm3: { + self.lvm2: { self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], self.local_ip: []}, } @@ -48,14 +50,14 @@ results = {} self.fdb_entries1[self.lvms[1].net]['ports'] = {} for lvm, agent_ports in self.fakeagent.get_agent_ports( - self.fdb_entries1, self.local_vlan_map1): + self.fdb_entries1): results[lvm] = agent_ports expected = { - self.lvm1: { + self.lvm0: { self.ports[0].ip: [(self.lvms[0].mac, self.lvms[0].ip)], self.local_ip: []}, - self.lvm2: {}, - self.lvm3: { + self.lvm1: {}, + self.lvm2: { self.ports[2].ip: [(self.lvms[2].mac, self.lvms[2].ip)], self.local_ip: []}, } @@ -65,40 +67,40 @@ with mock.patch.object(self.fakeagent, 'setup_tunnel_port'),\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: - self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1, + self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), - self.ports[0].ip, self.lvm1, self.ports[0].ofport), + self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), - self.ports[1].ip, self.lvm1, self.ports[1].ofport), + self.ports[1].ip, self.lvm0, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), - self.ports[2].ip, self.lvm1, self.ports[2].ofport), + self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) def test_fdb_add_tun_non_existence_key_in_ofports(self): - ofport = self.lvm1.network_type + '0a0a0a0a' + ofport = self.lvm0.network_type + '0a0a0a0a' del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object(self.fakeagent, 'setup_tunnel_port', return_value=ofport ) as mock_setup_tunnel_port,\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: - self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1, + self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) mock_setup_tunnel_port.assert_called_once_with( - self.fakebr, self.ports[1].ip, self.lvm1.network_type) + self.fakebr, self.ports[1].ip, self.lvm0.network_type) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), - self.ports[0].ip, self.lvm1, self.ports[0].ofport), + self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), - self.ports[1].ip, self.lvm1, ofport), + self.ports[1].ip, self.lvm0, ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), - self.ports[2].ip, self.lvm1, self.ports[2].ofport), + self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) @@ -110,16 +112,16 @@ ) as mock_setup_tunnel_port,\ mock.patch.object(self.fakeagent, 'add_fdb_flow' ) as mock_add_fdb_flow: - self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm1, + self.fakeagent.fdb_add_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) mock_setup_tunnel_port.assert_called_once_with( - self.fakebr, self.ports[1].ip, self.lvm1.network_type) + self.fakebr, self.ports[1].ip, self.lvm0.network_type) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), - self.ports[0].ip, self.lvm1, self.ports[0].ofport), + self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), - self.ports[2].ip, self.lvm1, self.ports[2].ofport), + self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_add_fdb_flow.call_args_list)) @@ -127,16 +129,16 @@ def test_fdb_remove_tun(self): with mock.patch.object( self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow: - self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1, + self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), - self.ports[0].ip, self.lvm1, self.ports[0].ofport), + self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[1].mac, self.lvms[1].ip), - self.ports[1].ip, self.lvm1, self.ports[1].ofport), + self.ports[1].ip, self.lvm0, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), - self.ports[2].ip, self.lvm1, self.ports[2].ofport), + self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) @@ -147,35 +149,35 @@ ) as mock_del_fdb_flow,\ mock.patch.object(self.fakeagent, 'cleanup_tunnel_port' ) as mock_cleanup_tunnel_port: - self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1, + self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), - self.ports[0].ip, self.lvm1, self.ports[0].ofport), + self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (n_const.FLOODING_ENTRY[0], n_const.FLOODING_ENTRY[1]), - self.ports[1].ip, self.lvm1, self.ports[1].ofport), + self.ports[1].ip, self.lvm0, self.ports[1].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), - self.ports[2].ip, self.lvm1, self.ports[2].ofport), + self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) mock_cleanup_tunnel_port.assert_called_once_with( - self.fakebr, self.ports[1].ofport, self.lvm1.network_type) + self.fakebr, self.ports[1].ofport, self.lvm0.network_type) def test_fdb_remove_tun_non_existence_key_in_ofports(self): del self.ofports[self.type_gre][self.ports[1].ip] with mock.patch.object( self.fakeagent, 'del_fdb_flow') as mock_del_fdb_flow: - self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm1, + self.fakeagent.fdb_remove_tun('context', self.fakebr, self.lvm0, self.agent_ports, self._tunnel_port_lookup) expected = [ mock.call(self.fakebr, (self.lvms[0].mac, self.lvms[0].ip), - self.ports[0].ip, self.lvm1, self.ports[0].ofport), + self.ports[0].ip, self.lvm0, self.ports[0].ofport), mock.call(self.fakebr, (self.lvms[2].mac, self.lvms[2].ip), - self.ports[2].ip, self.lvm1, self.ports[2].ofport), + self.ports[2].ip, self.lvm0, self.ports[2].ofport), ] self.assertEqual(sorted(expected), sorted(mock_del_fdb_flow.call_args_list)) @@ -193,23 +195,24 @@ 'context', self.upd_fdb_entry1) def test__fdb_chg_ip(self): - m_setup_entry_for_arp_reply = mock.Mock() - self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply - self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, - self.upd_fdb_entry1_val, self.local_ip, - self.local_vlan_map1) + with mock.patch.object( + self.fakeagent, + 'setup_entry_for_arp_reply') as m_setup_entry_for_arp_reply: + self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, + self.upd_fdb_entry1_val, + self.local_ip) expected = [ - mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac, + mock.call(self.fakebr, 'remove', self.lvm0.vlan, self.lvms[0].mac, self.lvms[0].ip), - mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac, + mock.call(self.fakebr, 'add', self.lvm0.vlan, self.lvms[1].mac, self.lvms[1].ip), - mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac, + mock.call(self.fakebr, 'remove', self.lvm0.vlan, self.lvms[0].mac, self.lvms[0].ip), - mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[1].mac, + mock.call(self.fakebr, 'add', self.lvm0.vlan, self.lvms[1].mac, self.lvms[1].ip), - mock.call(self.fakebr, 'remove', self.lvm2.vlan, self.lvms[0].mac, + mock.call(self.fakebr, 'remove', self.lvm1.vlan, self.lvms[0].mac, self.lvms[0].ip), - mock.call(self.fakebr, 'add', self.lvm2.vlan, self.lvms[2].mac, + mock.call(self.fakebr, 'add', self.lvm1.vlan, self.lvms[2].mac, self.lvms[2].ip), ] m_setup_entry_for_arp_reply.assert_has_calls(expected, any_order=True) @@ -233,8 +236,7 @@ m_setup_entry_for_arp_reply = mock.Mock() self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, - upd_fdb_entry_val, self.local_ip, - self.local_vlan_map1) + upd_fdb_entry_val, self.local_ip) self.assertFalse(m_setup_entry_for_arp_reply.call_count) def test_fdb_chg_ip_tun_empty_before_after(self): @@ -247,6 +249,5 @@ self.fakeagent.setup_entry_for_arp_reply = m_setup_entry_for_arp_reply # passing non-local ip self.fakeagent.fdb_chg_ip_tun('context', self.fakebr, - upd_fdb_entry_val, "8.8.8.8", - self.local_vlan_map1) + upd_fdb_entry_val, "8.8.8.8") self.assertFalse(m_setup_entry_for_arp_reply.call_count) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py 2016-08-03 20:10:34.000000000 +0000 @@ -71,44 +71,44 @@ self.ctx.session.add(port_binding_cls(**binding_kwarg)) - def test_get_dvr_active_network_ports(self): + def test_get_distributed_active_network_ports(self): self._setup_port_binding() # Register a L2 agent + A bunch of other agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() - tunnel_network_ports = l2pop_db.get_dvr_active_network_ports( + tunnel_network_ports = l2pop_db.get_distributed_active_network_ports( self.ctx.session, 'network_id') self.assertEqual(1, len(tunnel_network_ports)) _, agent = tunnel_network_ports[0] self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) - def test_get_dvr_active_network_ports_no_candidate(self): + def test_get_distributed_active_network_ports_no_candidate(self): self._setup_port_binding() # Register a bunch of non-L2 agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() - tunnel_network_ports = l2pop_db.get_dvr_active_network_ports( + tunnel_network_ports = l2pop_db.get_distributed_active_network_ports( self.ctx.session, 'network_id') self.assertEqual(0, len(tunnel_network_ports)) - def test_get_nondvr_active_network_ports(self): + def test_get_nondistributed_active_network_ports(self): self._setup_port_binding(dvr=False) # Register a L2 agent + A bunch of other agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() helpers.register_ovs_agent() - fdb_network_ports = l2pop_db.get_nondvr_active_network_ports( + fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx.session, 'network_id') self.assertEqual(1, len(fdb_network_ports)) _, agent = fdb_network_ports[0] self.assertEqual(constants.AGENT_TYPE_OVS, agent.agent_type) - def test_get_nondvr_active_network_ports_no_candidate(self): + def test_get_nondistributed_active_network_ports_no_candidate(self): self._setup_port_binding(dvr=False) # Register a bunch of non-L2 agents on the same host helpers.register_l3_agent() helpers.register_dhcp_agent() - fdb_network_ports = l2pop_db.get_nondvr_active_network_ports( + fdb_network_ports = l2pop_db.get_nondistributed_active_network_ports( self.ctx.session, 'network_id') self.assertEqual(0, len(fdb_network_ports)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,16 +15,15 @@ import mock from neutron_lib import constants +from neutron_lib import exceptions from oslo_serialization import jsonutils import testtools -from neutron.common import constants as n_const from neutron.common import topics from neutron import context from neutron.extensions import portbindings from neutron.extensions import providernet as pnet from neutron import manager -from neutron.plugins.ml2.common import exceptions as ml2_exc from neutron.plugins.ml2 import driver_context from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db from neutron.plugins.ml2.drivers.l2pop import mech_driver as l2pop_mech_driver @@ -435,7 +434,7 @@ cidr='2001:db8::/64', ip_version=6, gateway_ip='fe80::1', - ipv6_address_mode=n_const.IPV6_SLAAC) as subnet2: + ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, @@ -912,7 +911,7 @@ def _test_create_agent_fdb(self, fdb_network_ports, agent_ips): mech_driver = l2pop_mech_driver.L2populationMechanismDriver() tunnel_network_ports, tunnel_agent = ( - self._mock_network_ports(HOST + '1', None)) + self._mock_network_ports(HOST + '1', [None])) agent_ips[tunnel_agent] = '10.0.0.1' def agent_ip_side_effect(agent): @@ -920,9 +919,11 @@ with mock.patch.object(l2pop_db, 'get_agent_ip', side_effect=agent_ip_side_effect),\ - mock.patch.object(l2pop_db, 'get_nondvr_active_network_ports', + mock.patch.object(l2pop_db, + 'get_nondistributed_active_network_ports', return_value=fdb_network_ports),\ - mock.patch.object(l2pop_db, 'get_dvr_active_network_ports', + mock.patch.object(l2pop_db, + 'get_distributed_active_network_ports', return_value=tunnel_network_ports): session = mock.Mock() agent = mock.Mock() @@ -933,17 +934,17 @@ segment, 'network_id') - def _mock_network_ports(self, host_name, binding): + def _mock_network_ports(self, host_name, bindings): agent = mock.Mock() agent.host = host_name - return [(binding, agent)], agent + return [(binding, agent) for binding in bindings], agent def test_create_agent_fdb(self): binding = mock.Mock() binding.port = {'mac_address': '00:00:DE:AD:BE:EF', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} fdb_network_ports, fdb_agent = ( - self._mock_network_ports(HOST + '2', binding)) + self._mock_network_ports(HOST + '2', [binding])) agent_ips = {fdb_agent: '20.0.0.1'} agent_fdb = self._test_create_agent_fdb(fdb_network_ports, @@ -973,6 +974,33 @@ [constants.FLOODING_ENTRY]}} self.assertEqual(expected_result, result) + def test_create_agent_fdb_concurrent_port_deletion(self): + binding = mock.Mock() + binding.port = {'mac_address': '00:00:DE:AD:BE:EF', + 'fixed_ips': [{'ip_address': '1.1.1.1'}]} + binding2 = mock.Mock() + # the port was deleted + binding2.port = None + fdb_network_ports, fdb_agent = ( + self._mock_network_ports(HOST + '2', [binding, binding2])) + agent_ips = {fdb_agent: '20.0.0.1'} + + agent_fdb = self._test_create_agent_fdb(fdb_network_ports, + agent_ips) + result = agent_fdb['network_id'] + + expected_result = {'segment_id': 1, + 'network_type': 'vxlan', + 'ports': + {'10.0.0.1': + [constants.FLOODING_ENTRY], + '20.0.0.1': + [constants.FLOODING_ENTRY, + l2pop_rpc.PortInfo( + mac_address='00:00:DE:AD:BE:EF', + ip_address='1.1.1.1')]}} + self.assertEqual(expected_result, result) + def test_update_port_precommit_mac_address_changed_raises(self): port = {'status': u'ACTIVE', 'device_owner': DEVICE_OWNER_COMPUTE, @@ -993,5 +1021,5 @@ original_port=original_port) mech_driver = l2pop_mech_driver.L2populationMechanismDriver() - with testtools.ExpectedException(ml2_exc.MechanismDriverError): + with testtools.ExpectedException(exceptions.InvalidInput): mech_driver.update_port_precommit(ctx) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -706,25 +706,36 @@ def test_remove_interface(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ - mock.patch.object(bridge_lib, - 'is_bridged_interface') as isdev_fn,\ + mock.patch.object(bridge_lib.BridgeDevice, + 'owns_interface') as owns_fn,\ mock.patch.object(bridge_lib.BridgeDevice, "delif") as delif_fn: de_fn.return_value = False self.assertFalse(self.lbm.remove_interface("br0", "eth0")) - self.assertFalse(isdev_fn.called) + self.assertFalse(owns_fn.called) de_fn.return_value = True - isdev_fn.return_value = False + owns_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) - isdev_fn.return_value = True - delif_fn.return_value = True - self.assertFalse(self.lbm.remove_interface("br0", "eth0")) - delif_fn.return_value = False self.assertTrue(self.lbm.remove_interface("br0", "eth0")) + def test_remove_interface_not_on_bridge(self): + bridge_device = mock.Mock() + with mock.patch.object(bridge_lib, "BridgeDevice", + return_value=bridge_device): + bridge_device.exists.return_value = True + bridge_device.delif.side_effect = RuntimeError + + bridge_device.owns_interface.side_effect = [True, False] + self.lbm.remove_interface("br0", 'tap0') + self.assertEqual(2, bridge_device.owns_interface.call_count) + + bridge_device.owns_interface.side_effect = [True, True] + self.assertRaises(RuntimeError, + self.lbm.remove_interface, "br0", 'tap0') + def test_delete_interface(self): with mock.patch.object(ip_lib.IPDevice, "exists") as de_fn,\ mock.patch.object(ip_lib.IpLinkCommand, "set_down") as down_fn,\ @@ -940,7 +951,7 @@ self.assertEqual(0, del_fn.call_count) self.assertEqual(1, log.call_count) - def test_fdb_add(self): + def _test_fdb_add(self, proxy_enabled=False): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, @@ -968,7 +979,17 @@ check_exit_code=False), ] execute_fn.assert_has_calls(expected) - add_fn.assert_called_with('port_ip', 'port_mac') + if proxy_enabled: + add_fn.assert_called_with('port_ip', 'port_mac') + else: + add_fn.assert_not_called() + + def test_fdb_add(self): + self._test_fdb_add(proxy_enabled=False) + + def test_fdb_add_with_arp_responder(self): + cfg.CONF.set_override('arp_responder', True, 'VXLAN') + self._test_fdb_add(proxy_enabled=True) def test_fdb_ignore(self): fdb_entries = {'net_id': @@ -999,7 +1020,7 @@ self.assertFalse(execute_fn.called) - def test_fdb_remove(self): + def _test_fdb_remove(self, proxy_enabled=False): fdb_entries = {'net_id': {'ports': {'agent_ip': [constants.FLOODING_ENTRY, @@ -1025,9 +1046,19 @@ check_exit_code=False), ] execute_fn.assert_has_calls(expected) - del_fn.assert_called_with('port_ip', 'port_mac') + if proxy_enabled: + del_fn.assert_called_with('port_ip', 'port_mac') + else: + del_fn.assert_not_called() - def test_fdb_update_chg_ip(self): + def test_fdb_remove(self): + self._test_fdb_remove(proxy_enabled=False) + + def test_fdb_remove_with_arp_responder(self): + cfg.CONF.set_override('arp_responder', True, 'VXLAN') + self._test_fdb_remove(proxy_enabled=True) + + def _test_fdb_update_chg_ip(self, proxy_enabled=False): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': @@ -1040,8 +1071,19 @@ return_value='') as del_fn: self.lb_rpc.fdb_update(None, fdb_entries) - del_fn.assert_called_with('port_ip_1', 'port_mac') - add_fn.assert_called_with('port_ip_2', 'port_mac') + if proxy_enabled: + del_fn.assert_called_with('port_ip_1', 'port_mac') + add_fn.assert_called_with('port_ip_2', 'port_mac') + else: + del_fn.assert_not_called() + add_fn.assert_not_called() + + def test_fdb_update_chg_ip(self): + self._test_fdb_update_chg_ip(proxy_enabled=False) + + def test_fdb_update_chg_ip_with_arp_responder(self): + cfg.CONF.set_override('arp_responder', True, 'VXLAN') + self._test_fdb_update_chg_ip(proxy_enabled=True) def test_fdb_update_chg_ip_empty_lists(self): fdb_entries = {'chg_ip': {'net_id': {'agent_ip': {}}}} diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py 2016-08-29 20:05:49.000000000 +0000 @@ -34,7 +34,6 @@ assert(context.current['id'] == context.original['id']) else: assert(not context.original) - assert(context.network_segments) def create_network_precommit(self, context): self._check_network_context(context, False) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_pci_lib.py 2016-08-03 20:10:34.000000000 +0000 @@ -41,8 +41,13 @@ '1500 qdisc noop state DOWN mode DEFAULT group ' 'default qlen 500 link/ether 4a:9b:6d:de:65:b5 brd ' 'ff:ff:ff:ff:ff:ff') + MACVTAP_LINK_SHOW2 = ('64: macvtap2@p1p2_1: mtu ' + '1500 qdisc noop state DOWN mode DEFAULT group ' + 'default qlen 500 link/ether 4a:9b:6d:de:65:b5 brd ' + 'ff:ff:ff:ff:ff:ff') IP_LINK_SHOW_WITH_MACVTAP = '\n'.join((VF_LINK_SHOW, MACVTAP_LINK_SHOW)) + IP_LINK_SHOW_WITH_MACVTAP2 = '\n'.join((VF_LINK_SHOW, MACVTAP_LINK_SHOW2)) MAC_MAPPING = { 0: "fa:16:3e:b4:81:ac", @@ -156,6 +161,13 @@ self.assertTrue( pci_lib.PciDeviceIPWrapper.is_macvtap_assigned('enp129s0f1')) + def test_is_macvtap_assigned_interface_with_underscore(self): + with mock.patch.object(pci_lib.PciDeviceIPWrapper, + "_execute") as mock_exec: + mock_exec.return_value = self.IP_LINK_SHOW_WITH_MACVTAP2 + self.assertTrue( + pci_lib.PciDeviceIPWrapper.is_macvtap_assigned('p1p2_1')) + def test_is_macvtap_assigned_not_assigned(self): with mock.patch.object(pci_lib.PciDeviceIPWrapper, "_execute") as mock_exec: diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py 2016-08-03 20:10:34.000000000 +0000 @@ -18,7 +18,7 @@ from oslo_config import cfg from oslo_utils import uuidutils -from neutron.agent.l2.extensions import manager as l2_ext_manager +from neutron.agent.l2 import l2_agent_extensions_manager as l2_ext_manager from neutron.agent import rpc as agent_rpc from neutron.extensions import portbindings from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/mech_driver/test_mech_sriov_nic_switch.py 2016-08-29 20:05:49.000000000 +0000 @@ -28,7 +28,6 @@ from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base MELLANOX_CONNECTX3_PCI_INFO = '15b3:1004' -DEFAULT_PCI_INFO = ['15b3:1004', '8086:10ca'] class TestFakePortContext(base.FakePortContext): @@ -77,9 +76,6 @@ 'configurations': BAD_CONFIGS}] def setUp(self): - cfg.CONF.set_override('supported_pci_vendor_devs', - DEFAULT_PCI_INFO, - 'ml2_sriov') super(SriovNicSwitchMechanismBaseTestCase, self).setUp() self.driver = mech_driver.SriovNicSwitchMechanismDriver() self.driver.initialize() @@ -168,6 +164,9 @@ mech_driver.VIF_TYPE_HW_VEB) def test_profile_unsupported_pci_info(self): + cfg.CONF.set_override('supported_pci_vendor_devs', ['aa:bb'], + 'ml2_sriov') + self.driver.initialize() with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' 'mech_driver.mech_driver.LOG') as log_mock: self._check_vif_for_pci_info('xxxx:yyyy', None) @@ -176,15 +175,21 @@ class SriovSwitchMechProfileFailTestCase(SriovNicSwitchMechanismBaseTestCase): - def _check_for_pci_vendor_info(self, pci_vendor_info): + def _check_for_pci_vendor_info( + self, pci_vendor_info, expected_result=False): context = TestFakePortContext(self.AGENT_TYPE, self.AGENTS, self.VLAN_SEGMENTS, portbindings.VNIC_DIRECT, pci_vendor_info) - self.driver._check_supported_pci_vendor_device(context) + self.assertEqual( + expected_result, + self.driver._check_supported_pci_vendor_device(context)) def test_profile_missing_profile(self): + cfg.CONF.set_override('supported_pci_vendor_devs', ['aa:bb'], + 'ml2_sriov') + self.driver.initialize() with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' 'mech_driver.mech_driver.LOG') as log_mock: self._check_for_pci_vendor_info({}) @@ -192,12 +197,30 @@ " binding") def test_profile_missing_pci_vendor_info(self): + cfg.CONF.set_override('supported_pci_vendor_devs', ['aa:bb'], + 'ml2_sriov') + self.driver.initialize() with mock.patch('neutron.plugins.ml2.drivers.mech_sriov.' 'mech_driver.mech_driver.LOG') as log_mock: self._check_for_pci_vendor_info({'aa': 'bb'}) log_mock.debug.assert_called_with("Missing pci vendor" " info in profile") + def test_pci_vendor_info_with_none(self): + self.driver.initialize() + self._check_for_pci_vendor_info( + {'aa': 'bb'}, expected_result=True) + + def test_pci_vendor_info(self): + cfg.CONF.set_override( + 'supported_pci_vendor_devs', + [MELLANOX_CONNECTX3_PCI_INFO], + 'ml2_sriov') + self.driver.initialize() + self._check_for_pci_vendor_info( + {'pci_vendor_info': MELLANOX_CONNECTX3_PCI_INFO}, + expected_result=True) + class SriovSwitchMechVifDetailsTestCase(SriovNicSwitchMechanismBaseTestCase): VLAN_SEGMENTS = [{api.ID: 'vlan_segment_id', @@ -251,8 +274,9 @@ def _set_config(self, pci_devs=['aa:bb']): cfg.CONF.set_override('mechanism_drivers', ['logger', 'sriovnicswitch'], 'ml2') - cfg.CONF.set_override('supported_pci_vendor_devs', pci_devs, - 'ml2_sriov') + if pci_devs: + cfg.CONF.set_override('supported_pci_vendor_devs', pci_devs, + 'ml2_sriov') def test_pci_vendor_config_single_entry(self): self._set_config() @@ -264,11 +288,6 @@ self.driver.initialize() self.assertEqual(['x:y', 'a:b'], self.driver.pci_vendor_info) - def test_pci_vendor_config_default_entry(self): - self.driver.initialize() - self.assertEqual(DEFAULT_PCI_INFO, - self.driver.pci_vendor_info) - def test_pci_vendor_config_wrong_entry(self): self._set_config(['wrong_entry']) self.assertRaises(cfg.Error, self.driver.initialize) @@ -288,3 +307,8 @@ def test_initialize_empty_string(self): self._set_config(['']) self.assertRaises(cfg.Error, self.driver.initialize) + + def test_initialize_pci_devs_none(self): + self._set_config(pci_devs=None) + self.driver.initialize() + self.assertIsNone(self.driver.pci_vendor_info) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py 2016-08-03 20:10:34.000000000 +0000 @@ -32,6 +32,10 @@ def setUp(self): super(QosOVSAgentDriverTestCase, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.context = context.get_admin_context() self.qos_driver = qos_driver.QosOVSAgentDriver() self.agent_api = ovs_ext_api.OVSAgentExtensionAPI( diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_phys.py 2016-08-03 20:10:34.000000000 +0000 @@ -32,6 +32,10 @@ def setUp(self): super(OVSPhysicalBridgeTest, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.setup_bridge_mock('br-phys', self.br_phys_cls) self.stamp = self.br.default_cookie diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/native/test_br_tun.py 2016-08-03 20:10:34.000000000 +0000 @@ -31,7 +31,20 @@ dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN def setUp(self): + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() super(OVSTunnelBridgeTest, self).setUp() + # NOTE(ivasilevskaya) The behaviour of oslotest.base.addCleanup() + # according to https://review.openstack.org/#/c/119201/4 guarantees + # that all started mocks will be stopped even without direct call to + # patcher.stop(). + # If any individual mocks should be stopped by other than default + # mechanism, their cleanup has to be added after + # oslotest.BaseTestCase.setUp() not to be included in the stopall set + # that will be cleaned up by mock.patch.stopall. This way the mock + # won't be attempted to be stopped twice. + self.addCleanup(conn_patcher.stop) self.setup_bridge_mock('br-tun', self.br_tun_cls) self.stamp = self.br.default_cookie diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/test_br_cookie.py 2016-08-03 20:10:34.000000000 +0000 @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import mock + from neutron.agent.common import ovs_lib from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ import ovs_bridge @@ -23,6 +25,10 @@ def setUp(self): super(TestBRCookieOpenflow, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.br = ovs_bridge.OVSAgentBridge('br-int') def test_reserved_cookies(self): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py 2016-08-03 20:10:34.000000000 +0000 @@ -41,6 +41,10 @@ class OVSAgentTestBase(OVSAgentConfigTestBase): def setUp(self): super(OVSAgentTestBase, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.br_int_cls = importutils.import_class(self._BR_INT_CLASS) self.br_phys_cls = importutils.import_class(self._BR_PHYS_CLASS) self.br_tun_cls = importutils.import_class(self._BR_TUN_CLASS) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py 2016-08-03 20:10:34.000000000 +0000 @@ -28,6 +28,10 @@ def setUp(self): super(base.BaseTestCase, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.br_int = ovs_bridge.OVSAgentBridge("br-int") self.br_tun = ovs_bridge.OVSAgentBridge("br-tun") @@ -61,6 +65,10 @@ def setUp(self): super(TestOVSCookieBridge, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.bridge = ovs_bridge.OVSAgentBridge("br-foo") self.bridge.do_action_flows = mock.Mock() self.tested_bridge = ovs_ext_agt.OVSCookieBridge(self.bridge) @@ -146,6 +154,10 @@ def setUp(self): super(TestOVSDeferredCookieBridge, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) self.bridge = ovs_bridge.OVSAgentBridge("br-foo") self.bridge.do_action_flows = mock.Mock() self.cookie_bridge = ovs_ext_agt.OVSCookieBridge(self.bridge) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,30 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.callbacks import events +from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_capabilities +from neutron.services.trunk.drivers.openvswitch.agent import driver +from neutron.tests import base +from neutron_lib import constants + + +class CapabilitiesTest(base.BaseTestCase): + + @mock.patch("neutron.callbacks.manager.CallbacksManager.subscribe") + def test_register(self, mocked_subscribe): + ovs_capabilities.register() + mocked_subscribe.assert_called_with(driver.init_handler, + constants.AGENT_TYPE_OVS, + events.AFTER_INIT) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -36,6 +36,8 @@ from neutron.tests import base from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base +from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ + import test_vlanmanager NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' @@ -97,6 +99,7 @@ def setUp(self): super(TestOvsNeutronAgent, self).setUp() + self.useFixture(test_vlanmanager.LocalVlanManagerFixture()) notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() @@ -148,9 +151,8 @@ fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] if old_local_vlan is not None: - self.agent.local_vlan_map[net_uuid] = ( - self.mod_agent.LocalVLANMapping( - old_local_vlan, None, None, None)) + self.agent.vlan_manager.add( + net_uuid, old_local_vlan, None, None, None) with mock.patch.object(self.agent, 'int_br', autospec=True) as int_br: int_br.db_get_val.return_value = db_get_val int_br.set_db_attribute.return_value = True @@ -241,17 +243,41 @@ 'physical_network': 'fake_network'} if segmentation_id is not None: local_vlan_map['segmentation_id'] = segmentation_id - with mock.patch.object(self.agent, 'int_br') as int_br: - int_br.get_vif_ports.return_value = [port] - int_br.get_ports_attributes.return_value = [{ - 'name': port.port_name, 'other_config': local_vlan_map, - 'tag': tag - }] + + # this is for the call inside get_vif_ports() + get_interfaces = [{'name': port.port_name, + 'ofport': '1', + 'external_ids': { + 'iface-id': '1', + 'attached-mac': 'mac1'}}, + {'name': 'invalid', + 'ofport': ovs_lib.INVALID_OFPORT, + 'external_ids': { + 'iface-id': '2', + 'attached-mac': 'mac2'}}, + {'name': 'unassigned', + 'ofport': ovs_lib.UNASSIGNED_OFPORT, + 'external_ids': { + 'iface-id': '3', + 'attached-mac': 'mac3'}}] + # this is for the call inside _restore_local_vlan_map() + get_ports = [{'name': port.port_name, + 'other_config': local_vlan_map, + 'tag': tag}] + + with mock.patch.object(self.agent.int_br, + 'get_ports_attributes', + side_effect=[get_interfaces, get_ports]) as gpa: self.agent._restore_local_vlan_map() expected_hints = {} if tag: expected_hints[net_uuid] = tag self.assertEqual(expected_hints, self.agent._local_vlan_hints) + # make sure invalid and unassigned ports were skipped + gpa.assert_has_calls([ + mock.call('Interface', columns=mock.ANY, if_exists=True), + mock.call('Port', columns=mock.ANY, ports=['fake_port']) + ]) def test_restore_local_vlan_map_with_device_has_tag(self): self._test_restore_local_vlan_maps(2) @@ -579,9 +605,8 @@ br = self.br_int_cls('br-int') mac = "ca:fe:de:ad:be:ef" port = ovs_lib.VifPort(1, 1, 1, mac, br) - lvm = self.mod_agent.LocalVLANMapping( - 1, '1', None, 1, {port.vif_id: port}) - local_vlan_map = {'1': lvm} + self.agent.vlan_manager.add( + '1', 1, '1', None, 1, {port.vif_id: port}) vif_port_set = set([1, 3]) registered_ports = set([1, 2]) port_tags_dict = {1: []} @@ -589,8 +614,7 @@ added=set([3]), current=vif_port_set, removed=set([2]), updated=set([1]) ) - with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map),\ - mock.patch.object(self.agent, 'tun_br', autospec=True): + with mock.patch.object(self.agent, 'tun_br', autospec=True): actual = self.mock_scan_ports( vif_port_set, registered_ports, port_tags_dict=port_tags_dict) self.assertEqual(expected, actual) @@ -622,8 +646,9 @@ new_failed_devices_retries_map) def test_add_port_tag_info(self): - self.agent.local_vlan_map["net1"] = mock.Mock() - self.agent.local_vlan_map["net1"].vlan = "1" + lvm = mock.Mock() + lvm.vlan = "1" + self.agent.vlan_manager.mapping["net1"] = lvm ovs_db_list = [{'name': 'tap1', 'tag': [], 'other_config': {'segmentation_id': '1'}}, @@ -658,7 +683,7 @@ def test_bind_devices(self): devices_up = ['tap1'] devices_down = ['tap2'] - self.agent.local_vlan_map["net1"] = mock.Mock() + self.agent.vlan_manager.mapping["net1"] = mock.Mock() ovs_db_list = [{'name': 'tap1', 'tag': []}, {'name': 'tap2', 'tag': []}] vif_port1 = mock.Mock() @@ -690,8 +715,7 @@ self.agent.prevent_arp_spoofing = enable_prevent_arp_spoofing ovs_db_list = [{'name': 'fake_device', 'tag': []}] - self.agent.local_vlan_map = { - 'fake_network': ovs_agent.LocalVLANMapping(1, None, None, 1)} + self.agent.vlan_manager.add('fake_network', 1, None, None, 1) vif_port = mock.Mock() vif_port.port_name = 'fake_device' vif_port.ofport = 1 @@ -1145,9 +1169,9 @@ def _test_setup_physical_bridges(self, port_exists=False): with mock.patch.object(ip_lib.IPDevice, "exists") as devex_fn,\ mock.patch.object(sys, "exit"),\ - mock.patch.object(utils, "execute"),\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ - mock.patch.object(self.agent, 'int_br') as int_br: + mock.patch.object(self.agent, 'int_br') as int_br,\ + mock.patch.object(ovs_lib.BaseOVS, 'get_bridges'): devex_fn.return_value = True parent = mock.MagicMock() phys_br = phys_br_cls() @@ -1166,6 +1190,7 @@ expected_calls = [ mock.call.phys_br_cls('br-eth'), mock.call.phys_br.create(), + mock.call.phys_br.set_secure_mode(), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), mock.call.int_br.db_get_val('Interface', 'int-br-eth', @@ -1258,11 +1283,11 @@ def _test_setup_physical_bridges_change_from_veth_to_patch_conf( self, port_exists=False): with mock.patch.object(sys, "exit"),\ - mock.patch.object(utils, "execute"),\ mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ mock.patch.object(self.agent, 'int_br') as int_br,\ mock.patch.object(self.agent.int_br, 'db_get_val', - return_value='veth'): + return_value='veth'),\ + mock.patch.object(ovs_lib.BaseOVS, 'get_bridges'): phys_br = phys_br_cls() parent = mock.MagicMock() parent.attach_mock(phys_br_cls, 'phys_br_cls') @@ -1280,6 +1305,7 @@ expected_calls = [ mock.call.phys_br_cls('br-eth'), mock.call.phys_br.create(), + mock.call.phys_br.set_secure_mode(), mock.call.phys_br.setup_controllers(mock.ANY), mock.call.phys_br.setup_default_table(), mock.call.int_br.delete_port('int-br-eth'), @@ -1399,7 +1425,7 @@ lvm = mock.Mock() lvm.network_type = "gre" lvm.vif_ports = {"vif1": mock.Mock()} - self.agent.local_vlan_map["netuid12345"] = lvm + self.agent.vlan_manager.mapping["netuid12345"] = lvm self.agent.port_unbound("vif1", "netuid12345") self.assertTrue(reclvl_fn.called) @@ -1422,7 +1448,7 @@ lvm2.vlan = 'vlan2' lvm2.segmentation_id = 'seg2' lvm2.tun_ofports = set(['1', '2']) - self.agent.local_vlan_map = {'net1': lvm1, 'net2': lvm2} + self.agent.vlan_manager.mapping = {'net1': lvm1, 'net2': lvm2} self.agent.tun_br_ofports = {'gre': {'1.1.1.1': '1', '2.2.2.2': '2'}} self.agent.arp_responder_enabled = True @@ -1769,6 +1795,8 @@ with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ mock.patch.object(async_process.AsyncProcess, "start"),\ + mock.patch.object(async_process.AsyncProcess, + "is_active", return_value=True),\ mock.patch.object(async_process.AsyncProcess, "stop"),\ mock.patch.object(log.KeywordArgumentAdapter, 'exception') as log_exception,\ @@ -1851,6 +1879,8 @@ def test_rpc_loop_fail_to_process_network_ports_keep_flows(self): with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ mock.patch.object(async_process.AsyncProcess, "start"),\ + mock.patch.object(async_process.AsyncProcess, + "is_active", return_value=True),\ mock.patch.object(async_process.AsyncProcess, "stop"),\ mock.patch.object( self.mod_agent.OVSNeutronAgent, @@ -2026,19 +2056,17 @@ """ def add_new_vlan_mapping(*args, **kwargs): - self.agent.local_vlan_map['bar'] = ( - self.mod_agent.LocalVLANMapping(1, 2, 3, 4)) + self.agent.vlan_manager.add('bar', 1, 2, 3, 4) bridge = mock.Mock() tunnel_type = 'vxlan' self.agent.tun_br_ofports = {tunnel_type: dict()} self.agent.l2_pop = False - self.agent.local_vlan_map = { - 'foo': self.mod_agent.LocalVLANMapping(4, tunnel_type, 2, 1)} + self.agent.vlan_manager.add('foo', 4, tunnel_type, 2, 1) self.agent.local_ip = '2.3.4.5' bridge.install_flood_to_tun.side_effect = add_new_vlan_mapping self.agent._setup_tunnel_port(bridge, 1, '1.2.3.4', tunnel_type=tunnel_type) - self.assertIn('bar', self.agent.local_vlan_map) + self.assertIn('bar', self.agent.vlan_manager) def test_setup_entry_for_arp_reply_ignores_ipv6_addresses(self): self.agent.arp_responder_enabled = True @@ -2102,6 +2130,10 @@ def setUp(self): super(AncillaryBridgesTest, self).setUp() + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) notifier_p = mock.patch(NOTIFIER) notifier_cls = notifier_p.start() self.notifier = mock.Mock() @@ -2268,7 +2300,8 @@ self.agent.patch_tun_ofport = 1 self.agent.patch_int_ofport = 2 self.agent.dvr_agent.local_ports = {} - self.agent.local_vlan_map = {} + self.agent.vlan_manager = self.useFixture( + test_vlanmanager.LocalVlanManagerFixture()).manager self.agent.dvr_agent.enable_distributed_routing = True self.agent.dvr_agent.enable_tunneling = True self.agent.dvr_agent.patch_tun_ofport = 1 @@ -2369,7 +2402,7 @@ n_const.DEVICE_OWNER_DVR_INTERFACE, False) phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network] int_ofp = self.agent.dvr_agent.int_ofports[physical_network] - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_phys_br = [ mock.call.provision_local_vlan( port=phy_ofp, @@ -2457,7 +2490,7 @@ self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = self._expected_port_bound( self._port, lvid) expected_on_tun_br = [ @@ -2539,7 +2572,7 @@ def test_port_bound_for_dvr_with_csnat_ports(self): self._setup_for_dvr_test() int_br, tun_br = self._port_bound_for_dvr_with_csnat_ports() - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type='vxlan', @@ -2566,7 +2599,7 @@ # simulate a replug self._port.ofport = 12 int_br, tun_br = self._port_bound_for_dvr_with_csnat_ports() - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = [ mock.call.delete_dvr_to_src_mac( network_type='vxlan', @@ -2687,7 +2720,7 @@ None, None, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan self.assertEqual(self._expected_port_bound(self._port, lvid), int_br.mock_calls) expected_on_tun_br = [ @@ -2722,7 +2755,7 @@ failed_devices = {'added': set(), 'removed': set()} failed_devices['removed'] = self.agent.treat_devices_removed( [self._port.vif_id]) - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan if ip_version == 4: expected = [ mock.call.delete_dvr_process_ipv4( @@ -2784,7 +2817,7 @@ None, None, self._fixed_ips, n_const.DEVICE_OWNER_DVR_INTERFACE, False) - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan self.assertEqual( self._expected_port_bound(self._port, lvid), int_br.mock_calls) @@ -2899,7 +2932,7 @@ None, None, self._fixed_ips, n_const.DEVICE_OWNER_ROUTER_SNAT, False) - lvid = self.agent.local_vlan_map[self._net_uuid].vlan + lvid = self.agent.vlan_manager.get(self._net_uuid).vlan expected_on_int_br = [ mock.call.install_dvr_to_src_mac( network_type='vxlan', diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py 2016-08-29 20:05:49.000000000 +0000 @@ -28,6 +28,8 @@ from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ import ovs_test_base +from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \ + import test_vlanmanager def nonzero(f): @@ -74,6 +76,11 @@ def setUp(self): super(TunnelTest, self).setUp() + self.useFixture(test_vlanmanager.LocalVlanManagerFixture()) + conn_patcher = mock.patch( + 'neutron.agent.ovsdb.native.connection.Connection.start') + conn_patcher.start() + self.addCleanup(conn_patcher.stop) cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') @@ -89,11 +96,11 @@ self.MAP_TUN_INT_OFPORT = 33333 self.MAP_TUN_PHY_OFPORT = 44444 - self.LVM = self.mod_agent.LocalVLANMapping( + self.LVM_DATA = ( LV_ID, 'gre', None, LS_ID, VIF_PORTS) - self.LVM_FLAT = self.mod_agent.LocalVLANMapping( + self.LVM_FLAT_DATA = ( LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) - self.LVM_VLAN = self.mod_agent.LocalVLANMapping( + self.LVM_VLAN_DATA = ( LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) self.inta = mock.Mock() @@ -202,6 +209,7 @@ self.mock_map_tun_bridge_expected = [ mock.call.create(), + mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.port_exists('phy-%s' % self.MAP_TUN_BRIDGE), @@ -245,7 +253,8 @@ mock.call.add_patch_port('patch-tun', 'patch-int'), ] self.mock_int_bridge_expected += [ - mock.call.get_vif_ports(), + mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT, + ovs_lib.UNASSIGNED_OFPORT)), mock.call.get_ports_attributes( 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] @@ -415,16 +424,16 @@ a = self._build_agent() a.available_local_vlans = set() - a.local_vlan_map[NET_UUID] = self.LVM + a.vlan_manager.add(NET_UUID, *self.LVM_DATA) a.reclaim_local_vlan(NET_UUID) - self.assertIn(self.LVM.vlan, a.available_local_vlans) + self.assertIn(self.LVM_DATA[0], a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_flat(self): self.mock_map_tun_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.MAP_TUN_PHY_OFPORT, - lvid=self.LVM_FLAT.vlan)) + lvid=self.LVM_FLAT_DATA[0])) self.mock_int_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.INT_OFPORT, @@ -435,16 +444,16 @@ a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() - a.local_vlan_map[NET_UUID] = self.LVM_FLAT + a.vlan_manager.add(NET_UUID, *self.LVM_FLAT_DATA) a.reclaim_local_vlan(NET_UUID) - self.assertIn(self.LVM_FLAT.vlan, a.available_local_vlans) + self.assertIn(self.LVM_FLAT_DATA[0], a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_vlan(self): self.mock_map_tun_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.MAP_TUN_PHY_OFPORT, - lvid=self.LVM_VLAN.vlan)) + lvid=self.LVM_VLAN_DATA[0])) self.mock_int_bridge_expected.append( mock.call.reclaim_local_vlan( port=self.INT_OFPORT, @@ -455,9 +464,9 @@ a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() - a.local_vlan_map[NET_UUID] = self.LVM_VLAN + a.vlan_manager.add(NET_UUID, *self.LVM_VLAN_DATA) a.reclaim_local_vlan(NET_UUID) - self.assertIn(self.LVM_VLAN.vlan, a.available_local_vlans) + self.assertIn(self.LVM_VLAN_DATA[0], a.available_local_vlans) self._verify_mock_calls() def test_port_bound(self): @@ -472,7 +481,7 @@ vlan_mapping)] a = self._build_agent() - a.local_vlan_map[NET_UUID] = self.LVM + a.vlan_manager.add(NET_UUID, *self.LVM_DATA) a.local_dvr_map = {} self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {} a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, @@ -483,7 +492,7 @@ with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'reclaim_local_vlan') as reclaim_local_vlan: a = self._build_agent() - a.local_vlan_map[NET_UUID] = self.LVM + a.vlan_manager.add(NET_UUID, *self.LVM_DATA) a.port_unbound(VIF_ID, NET_UUID) reclaim_local_vlan.assert_called_once_with(NET_UUID) @@ -502,7 +511,7 @@ a = self._build_agent() a.available_local_vlans = set([LV_ID]) - a.local_vlan_map[NET_UUID] = self.LVM + a.vlan_manager.add(NET_UUID, *self.LVM_DATA) self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock() a.port_dead(VIF_PORT) self._verify_mock_calls() @@ -659,6 +668,7 @@ self.mock_map_tun_bridge_expected = [ mock.call.create(), + mock.call.set_secure_mode(), mock.call.setup_controllers(mock.ANY), mock.call.setup_default_table(), mock.call.add_port(self.intb), @@ -692,7 +702,8 @@ mock.call.add_patch_port('patch-tun', 'patch-int') ] self.mock_int_bridge_expected += [ - mock.call.get_vif_ports(), + mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT, + ovs_lib.UNASSIGNED_OFPORT)), mock.call.get_ports_attributes( 'Port', columns=['name', 'other_config', 'tag'], ports=[]) ] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,126 @@ +# Copyright 2016 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures +import testtools + +from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager +from neutron.tests import base + + +class LocalVlanManagerFixture(fixtures.Fixture): + def _setUp(self): + super(LocalVlanManagerFixture, self)._setUp() + self.vlan_manager = vlanmanager.LocalVlanManager() + self.addCleanup(self.restore_manager) + # Remove _instance attribute from VlanManager in order to not obtain a + # singleton + del vlanmanager.LocalVlanManager._instance + self.manager = vlanmanager.LocalVlanManager() + + def restore_manager(self): + vlanmanager.LocalVlanManager._instance = self.vlan_manager + + +class TestLocalVLANMapping(base.BaseTestCase): + def test___eq___equal(self): + mapping1 = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) + mapping2 = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) + self.assertEqual(mapping1, mapping2) + + def test___eq___different(self): + mapping1 = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) + mapping2 = vlanmanager.LocalVLANMapping(1, 2, 4, 4, 5) + self.assertNotEqual(mapping1, mapping2) + + def test___eq___different_type(self): + mapping = vlanmanager.LocalVLANMapping(1, 2, 3, 4, 5) + self.assertNotEqual(mapping, "foo") + + +class TestLocalVlanManager(base.BaseTestCase): + + def setUp(self): + super(TestLocalVlanManager, self).setUp() + self.vlan_manager = self.useFixture(LocalVlanManagerFixture()).manager + + def test_is_singleton(self): + self.vlan_manager.add(1, None, None, None, None) + new_vlan_manager = vlanmanager.LocalVlanManager() + self.assertIs(new_vlan_manager, self.vlan_manager) + self.assertItemsEqual(new_vlan_manager.mapping, + self.vlan_manager.mapping) + + def test_in_operator_on_key(self): + self.vlan_manager.add(1, None, None, None, None) + self.assertIn(1, self.vlan_manager) + self.assertNotIn(2, self.vlan_manager) + + def test_iterator_returns_vlan_mappings(self): + created_vlans = [] + for val in range(3): + self.vlan_manager.add(val, val, val, val, val) + created_vlans.append(self.vlan_manager.get(val)) + + self.assertItemsEqual(created_vlans, list(self.vlan_manager)) + + def test_get_net_uuid_existing(self): + port_id = 'port-id' + vlan_data = (2, 3, 4, 5, {port_id: 'port'}) + net_id = 1 + self.vlan_manager.add(net_id, *vlan_data) + obtained_net_id = self.vlan_manager.get_net_uuid(port_id) + self.assertEqual(net_id, obtained_net_id) + + def test_get_net_uuid_non_existing_raises_exception(self): + vlan_data = (1, 2, 3, 4, 5, {'port_id': 'port'}) + self.vlan_manager.add(*vlan_data) + with testtools.ExpectedException(vlanmanager.VifIdNotFound): + self.vlan_manager.get_net_uuid('non-existing-port') + + def test_add_and_get(self): + vlan_data = (2, 3, 4, 5, 6) + expected_vlan_mapping = vlanmanager.LocalVLANMapping(*vlan_data) + self.vlan_manager.add(1, *vlan_data) + vlan_mapping = self.vlan_manager.get(1) + self.assertEqual(expected_vlan_mapping, vlan_mapping) + + def test_add_existing_raises_exception(self): + vlan_data = (2, 3, 4, 5, 6) + self.vlan_manager.add(1, *vlan_data) + with testtools.ExpectedException(vlanmanager.MappingAlreadyExists): + self.vlan_manager.add(1, *vlan_data) + + def test_get_non_existing_raises_keyerror(self): + with testtools.ExpectedException(vlanmanager.MappingNotFound): + self.vlan_manager.get(1) + + def test_pop(self): + vlan_data = (2, 3, 4, 5, 6) + expected_vlan_mapping = vlanmanager.LocalVLANMapping(*vlan_data) + self.vlan_manager.add(1, *vlan_data) + vlan_mapping = self.vlan_manager.pop(1) + self.assertEqual(expected_vlan_mapping, vlan_mapping) + self.assertFalse(self.vlan_manager.mapping) + + def test_pop_non_existing_raises_exception(self): + with testtools.ExpectedException(vlanmanager.MappingNotFound): + self.vlan_manager.pop(1) + + +class TestDeprecationMessage(base.BaseTestCase): + def test_deprecation_message(self): + """Test that calling function doesn't crash""" + vlanmanager.deprecate_local_vlan_map_in_object("foo") diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/openvswitch/mech_driver/test_mech_openvswitch.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,10 +16,14 @@ from neutron_lib import constants from oslo_config import cfg +from neutron.callbacks import events +from neutron.callbacks import registry from neutron.extensions import portbindings from neutron.plugins.ml2 import driver_api as api -from neutron.plugins.ml2.drivers.openvswitch.mech_driver \ - import mech_openvswitch +from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( + constants as a_const) +from neutron.plugins.ml2.drivers.openvswitch.mech_driver import ( + mech_openvswitch) from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base @@ -59,6 +63,19 @@ self.driver = mech_openvswitch.OpenvswitchMechanismDriver() self.driver.initialize() + def test__set_bridge_name_notify(self): + + def fake_callback(resource, event, trigger, **kwargs): + trigger('fake-br-name') + + registry.subscribe(fake_callback, a_const.OVS_BRIDGE_NAME, + events.BEFORE_READ) + fake_vif_details = {} + self.driver._set_bridge_name('foo', fake_vif_details) + self.assertEqual( + 'fake-br-name', + fake_vif_details.get(portbindings.VIF_DETAILS_BRIDGE_NAME, '')) + class OpenvswitchMechanismSGDisabledBaseTestCase( OpenvswitchMechanismBaseTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_flat.py 2016-08-29 20:05:49.000000000 +0000 @@ -17,6 +17,7 @@ from neutron.common import exceptions as n_exc import neutron.db.api as db +from neutron.db.models.plugins.ml2 import flatallocation as type_flat_model from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config from neutron.plugins.ml2 import driver_api as api @@ -39,7 +40,7 @@ self.driver.physnet_mtus = [] def _get_allocation(self, session, segment): - return session.query(type_flat.FlatAllocation).filter_by( + return session.query(type_flat_model.FlatAllocation).filter_by( physical_network=segment[api.PHYSICAL_NETWORK]).first() def test_is_partial_segment(self): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_geneve.py 2016-08-03 20:10:34.000000000 +0000 @@ -53,3 +53,10 @@ testlib_api.SqlTestCase): DRIVER_CLASS = type_geneve.GeneveTypeDriver TYPE = p_const.TYPE_GENEVE + + +class GeneveTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, + testlib_api.SqlTestCase): + DRIVER_CLASS = type_geneve.GeneveTypeDriver + TYPE = p_const.TYPE_GENEVE + ENCAP_OVERHEAD = p_const.GENEVE_ENCAP_MIN_OVERHEAD diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,8 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron.db.models.plugins.ml2 import gre_allocation_endpoints as gre_model from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import config from neutron.plugins.ml2.drivers import type_gre from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc @@ -28,12 +28,12 @@ def _add_allocation(session, gre_id, allocated=False): - allocation = type_gre.GreAllocation(gre_id=gre_id, allocated=allocated) + allocation = gre_model.GreAllocation(gre_id=gre_id, allocated=allocated) allocation.save(session) def _get_allocation(session, gre_id): - return session.query(type_gre.GreAllocation).filter_by( + return session.query(gre_model.GreAllocation).filter_by( gre_id=gre_id).one() @@ -55,30 +55,6 @@ elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) - def test_get_mtu(self): - config.cfg.CONF.set_override('global_physnet_mtu', 1500) - config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') - self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} - self.assertEqual(1475 - p_const.GRE_ENCAP_OVERHEAD, - self.driver.get_mtu('physnet1')) - - config.cfg.CONF.set_override('global_physnet_mtu', 1425) - config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') - self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1400} - self.assertEqual(1425 - p_const.GRE_ENCAP_OVERHEAD, - self.driver.get_mtu('physnet1')) - - config.cfg.CONF.set_override('global_physnet_mtu', 0) - config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') - self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1425} - self.assertEqual(1475 - p_const.GRE_ENCAP_OVERHEAD, - self.driver.get_mtu('physnet2')) - - config.cfg.CONF.set_override('global_physnet_mtu', 0) - config.cfg.CONF.set_override('path_mtu', 0, group='ml2') - self.driver.physnet_mtus = {} - self.assertEqual(0, self.driver.get_mtu('physnet1')) - class GreTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): @@ -90,3 +66,10 @@ testlib_api.SqlTestCase): DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE + + +class GreTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, + testlib_api.SqlTestCase): + DRIVER_CLASS = type_gre.GreTypeDriver + TYPE = p_const.TYPE_GRE + ENCAP_OVERHEAD = p_const.GRE_ENCAP_OVERHEAD diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py 2016-08-03 20:10:34.000000000 +0000 @@ -14,7 +14,6 @@ # under the License. from neutron.plugins.common import constants as p_const -from neutron.plugins.ml2 import config from neutron.plugins.ml2.drivers import type_vxlan from neutron.tests.unit.plugins.ml2.drivers import base_type_tunnel from neutron.tests.unit.plugins.ml2 import test_rpc @@ -65,30 +64,6 @@ self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port']) self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) - def test_get_mtu(self): - config.cfg.CONF.set_override('global_physnet_mtu', 1500) - config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') - self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400} - self.assertEqual(1475 - p_const.VXLAN_ENCAP_OVERHEAD, - self.driver.get_mtu('physnet1')) - - config.cfg.CONF.set_override('global_physnet_mtu', 1450) - config.cfg.CONF.set_override('path_mtu', 1475, group='ml2') - self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1425} - self.assertEqual(1450 - p_const.VXLAN_ENCAP_OVERHEAD, - self.driver.get_mtu('physnet1')) - - config.cfg.CONF.set_override('global_physnet_mtu', 0) - config.cfg.CONF.set_override('path_mtu', 1450, group='ml2') - self.driver.physnet_mtus = {'physnet1': 1425, 'physnet2': 1400} - self.assertEqual(1450 - p_const.VXLAN_ENCAP_OVERHEAD, - self.driver.get_mtu('physnet1')) - - config.cfg.CONF.set_override('global_physnet_mtu', 0) - config.cfg.CONF.set_override('path_mtu', 0, group='ml2') - self.driver.physnet_mtus = {} - self.assertEqual(0, self.driver.get_mtu('physnet1')) - class VxlanTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, testlib_api.SqlTestCase): @@ -100,3 +75,10 @@ testlib_api.SqlTestCase): DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN + + +class VxlanTypeTunnelMTUTest(base_type_tunnel.TunnelTypeMTUTestMixin, + testlib_api.SqlTestCase): + DRIVER_CLASS = type_vxlan.VxlanTypeDriver + TYPE = p_const.TYPE_VXLAN + ENCAP_OVERHEAD = p_const.VXLAN_ENCAP_OVERHEAD diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/extensions/test_dns_integration.py 2016-08-29 20:05:49.000000000 +0000 @@ -34,7 +34,6 @@ mock_client = mock.Mock() mock_admin_client = mock.Mock() mock_config = {'return_value': (mock_client, mock_admin_client)} -DNSDOMAIN = 'domain.com.' DNSNAME = 'port-dns-name' NEWDNSNAME = 'new-port-dns-name' V4UUID = 'v4_uuid' @@ -46,6 +45,7 @@ **mock_config) class DNSIntegrationTestCase(test_plugin.Ml2PluginV2TestCase): _extension_drivers = ['dns'] + _domain = 'domain.com.' def setUp(self): config.cfg.CONF.set_override('extension_drivers', @@ -58,6 +58,7 @@ dns_integration.DNS_DRIVER = None dns_integration.subscribe() self.plugin = manager.NeutronManager.get_plugin() + config.cfg.CONF.set_override('dns_domain', self._domain) def _create_port_for_test(self, provider_net=True, dns_domain=True, dns_name=True, ipv4=True, ipv6=True): @@ -69,7 +70,7 @@ pnet.SEGMENTATION_ID: '2016', } if dns_domain: - net_kwargs[dns.DNSDOMAIN] = DNSDOMAIN + net_kwargs[dns.DNSDOMAIN] = self._domain net_kwargs['arg_list'] = \ net_kwargs.get('arg_list', ()) + (dns.DNSDOMAIN,) res = self._create_network(self.fmt, 'test_network', True, @@ -225,7 +226,9 @@ len(mock_admin_client.recordsets.delete.call_args_list) == len(expected_delete)) else: - self.assertTrue(dns_data_db is None) + if not dns_name: + self.assertEqual('', port[dns.DNSNAME]) + self.assertTrue(dns_data_db is None) self.assertFalse(mock_client.recordsets.create.call_args_list) self.assertFalse( mock_admin_client.recordsets.create.call_args_list) @@ -246,43 +249,35 @@ config.cfg.CONF.designate.ipv6_ptr_zone_prefix_size) / 4 def test_create_port(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db) def test_create_port_tenant_network(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(provider_net=False) self._verify_port_dns(net, port, dns_data_db, provider_net=False) def test_create_port_no_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_name=False) self._verify_port_dns(net, port, dns_data_db, dns_name=False) def test_create_port_no_dns_domain(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_domain=False) self._verify_port_dns(net, port, dns_data_db, dns_domain=False) def test_create_port_no_dns_driver(self, *mocks): config.cfg.CONF.set_override('external_dns_driver', '') - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db, dns_driver=False) def test_create_port_no_ipv6(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(ipv6=False) self._verify_port_dns(net, port, dns_data_db) def test_create_port_no_ipv4(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(ipv4=False) self._verify_port_dns(net, port, dns_data_db) def test_create_port_no_ptr_zones(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) config.cfg.CONF.set_override('allow_reverse_dns_lookup', False, group='designate') net, port, dns_data_db = self._create_port_for_test() @@ -291,7 +286,6 @@ group='designate') def test_update_port(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=True, @@ -299,12 +293,11 @@ previous_dns_name=DNSNAME) def test_update_port_with_current_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name=DNSNAME) self.assertEqual(DNSNAME, dns_data_db['current_dns_name']) - self.assertEqual(DNSDOMAIN, dns_data_db['current_dns_domain']) + self.assertEqual(self._domain, dns_data_db['current_dns_domain']) self.assertEqual('', dns_data_db['previous_dns_name']) self.assertEqual('', dns_data_db['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) @@ -315,7 +308,6 @@ mock_admin_client.recordsets.delete.call_args_list) def test_update_port_tenant_network(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(provider_net=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=True, @@ -323,7 +315,6 @@ previous_dns_name=DNSNAME, provider_net=False) def test_update_port_no_dns_domain(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_domain=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=True, @@ -331,7 +322,6 @@ previous_dns_name=DNSNAME, dns_domain=False) def test_update_port_add_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test(dns_name=False) port, dns_data_db = self._update_port_for_test(port) self._verify_port_dns(net, port, dns_data_db, delete_records=False, @@ -339,14 +329,12 @@ previous_dns_name='') def test_update_port_clear_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db = self._update_port_for_test(port, new_dns_name='') self._verify_port_dns(net, port, dns_data_db, delete_records=True, current_dns_name='', previous_dns_name=DNSNAME) def test_update_port_non_dns_name_attribute(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port_name = 'port_name' kwargs = {'name': port_name} @@ -354,7 +342,7 @@ new_dns_name=None, **kwargs) self.assertEqual(DNSNAME, dns_data_db['current_dns_name']) - self.assertEqual(DNSDOMAIN, dns_data_db['current_dns_domain']) + self.assertEqual(self._domain, dns_data_db['current_dns_domain']) self.assertEqual('', dns_data_db['previous_dns_name']) self.assertEqual('', dns_data_db['previous_dns_domain']) self.assertFalse(mock_client.recordsets.create.call_args_list) @@ -366,7 +354,6 @@ self.assertEqual(port_name, port['name']) def test_update_port_fixed_ips(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} @@ -383,7 +370,6 @@ original_ips=original_ips) def test_update_port_fixed_ips_with_subnet_ids(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] ctx = context.get_admin_context() @@ -415,7 +401,6 @@ original_ips=original_ips) def test_update_port_fixed_ips_with_new_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} @@ -432,7 +417,6 @@ original_ips=original_ips) def test_update_port_fixed_ips_with_current_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} @@ -449,7 +433,6 @@ original_ips=original_ips) def test_update_port_fixed_ips_clearing_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() original_ips = [ip['ip_address'] for ip in port['fixed_ips']] kwargs = {'fixed_ips': []} @@ -465,7 +448,6 @@ original_ips=original_ips) def test_update_fixed_ips_no_effect_after_clearing_dns_name(self, *mocks): - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() port, dns_data_db_1 = self._update_port_for_test(port, new_dns_name='') @@ -496,13 +478,90 @@ self.assertFalse( mock_admin_client.recordsets.delete.call_args_list) + def test_create_port_dns_name_field_missing(self, *mocks): + res = self._create_network(self.fmt, 'test_network', True) + net = self.deserialize(self.fmt, res)['network'] + cidr = '10.0.0.0/24' + self._create_subnet_for_test(net['id'], cidr) + port_request = { + 'port': { + 'network_id': net['id'], + 'tenant_id': net['tenant_id'], + 'name': 'mugsie', + 'admin_state_up': True, + 'device_id': '', + 'device_owner': '', + 'fixed_ips': '' + } + } + self.plugin.create_port(self.context, port_request) + def test_dns_driver_loaded_after_server_restart(self, *mocks): dns_integration.DNS_DRIVER = None - config.cfg.CONF.set_override('dns_domain', DNSDOMAIN) net, port, dns_data_db = self._create_port_for_test() self._verify_port_dns(net, port, dns_data_db) +class DNSIntegrationTestCaseDefaultDomain(DNSIntegrationTestCase): + _domain = 'openstacklocal.' + + def _generate_dns_assignment(self, port): + fqdn = [] + for ip in port['fixed_ips']: + hostname = 'host-%s' % ip['ip_address'].replace( + '.', '-').replace(':', '-') + fqdn.append('%s.%s' % (hostname, self._domain)) + return set(fqdn) + + def _verify_port_dns(self, net, port, dns_data_db, dns_name=True, + dns_domain=True, ptr_zones=True, delete_records=False, + provider_net=True, dns_driver=True, original_ips=None, + current_dns_name=DNSNAME, previous_dns_name=''): + self.assertEqual('', port[dns.DNSNAME]) + fqdn_set = self._generate_dns_assignment(port) + port_fqdn_set = set([each['fqdn'] for each in port['dns_assignment']]) + self.assertEqual(fqdn_set, port_fqdn_set) + self.assertIsNone(dns_data_db, "dns data should be none") + self.assertFalse(mock_client.recordsets.create.call_args_list) + self.assertFalse( + mock_admin_client.recordsets.create.call_args_list) + self.assertFalse(mock_client.recordsets.delete.call_args_list) + self.assertFalse( + mock_admin_client.recordsets.delete.call_args_list) + + def test_update_fixed_ips_no_effect_after_clearing_dns_name(self, *mocks): + net, port, dns_data_db = self._create_port_for_test() + port, dns_data_db_1 = self._update_port_for_test(port, + new_dns_name='') + kwargs = {'fixed_ips': []} + for ip in port['fixed_ips']: + kwargs['fixed_ips'].append( + {'subnet_id': ip['subnet_id'], + 'ip_address': + str(netaddr.IPAddress(ip['ip_address']) + 1)}) + mock_client.reset_mock() + mock_admin_client.reset_mock() + port, dns_data_db_2 = self._update_port_for_test(port, + new_dns_name='', + **kwargs) + self._verify_port_dns(net, port, dns_data_db_2) + + def test_update_port_non_dns_name_attribute(self, *mocks): + net, port, dns_data_db = self._create_port_for_test() + port_name = 'port_name' + kwargs = {'name': port_name} + port, dns_data_db = self._update_port_for_test(port, + new_dns_name=None, + **kwargs) + self._verify_port_dns(net, port, dns_data_db) + + def test_update_port_with_current_dns_name(self, *mocks): + net, port, dns_data_db = self._create_port_for_test() + port, dns_data_db = self._update_port_for_test(port, + new_dns_name=DNSNAME) + self._verify_port_dns(net, port, dns_data_db) + + class TestDesignateClient(testtools.TestCase): """Test case for designate clients """ diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_db.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_db.py 2016-08-03 20:10:34.000000000 +0000 @@ -66,7 +66,7 @@ self._setup_neutron_network(network_id) for segment in segments: segments_db.add_network_segment( - self.ctx.session, network_id, segment, + self.ctx, network_id, segment, is_dynamic=is_seg_dynamic) net_segments = segments_db.get_network_segments( @@ -292,7 +292,8 @@ self.ctx.session.add(router) return router - def _setup_dvr_binding(self, network_id, port_id, router_id, host_id): + def _setup_distributed_binding(self, network_id, + port_id, router_id, host_id): with self.ctx.session.begin(subtransactions=True): record = models.DistributedPortBinding( port_id=port_id, @@ -304,81 +305,85 @@ self.ctx.session.add(record) return record - def test_ensure_dvr_port_binding_deals_with_db_duplicate(self): + def test_ensure_distributed_port_binding_deals_with_db_duplicate(self): network_id = 'foo_network_id' port_id = 'foo_port_id' router_id = 'foo_router_id' host_id = 'foo_host_id' self._setup_neutron_network(network_id, [port_id]) - self._setup_dvr_binding(network_id, port_id, router_id, host_id) + self._setup_distributed_binding(network_id, port_id, + router_id, host_id) with mock.patch.object(query.Query, 'first') as query_first: query_first.return_value = [] with mock.patch.object(ml2_db.LOG, 'debug') as log_trace: - binding = ml2_db.ensure_dvr_port_binding( + binding = ml2_db.ensure_distributed_port_binding( self.ctx.session, port_id, host_id, router_id) self.assertTrue(query_first.called) self.assertTrue(log_trace.called) self.assertEqual(port_id, binding.port_id) - def test_ensure_dvr_port_binding(self): + def test_ensure_distributed_port_binding(self): network_id = 'foo_network_id' port_id = 'foo_port_id' self._setup_neutron_network(network_id, [port_id]) router = self._setup_neutron_router() - ml2_db.ensure_dvr_port_binding( + ml2_db.ensure_distributed_port_binding( self.ctx.session, port_id, 'foo_host', router.id) expected = (self.ctx.session.query(models.DistributedPortBinding). filter_by(port_id=port_id).one()) self.assertEqual(port_id, expected.port_id) - def test_ensure_dvr_port_binding_multiple_bindings(self): + def test_ensure_distributed_port_binding_multiple_bindings(self): network_id = 'foo_network_id' port_id = 'foo_port_id' self._setup_neutron_network(network_id, [port_id]) router = self._setup_neutron_router() - ml2_db.ensure_dvr_port_binding( + ml2_db.ensure_distributed_port_binding( self.ctx.session, port_id, 'foo_host_1', router.id) - ml2_db.ensure_dvr_port_binding( + ml2_db.ensure_distributed_port_binding( self.ctx.session, port_id, 'foo_host_2', router.id) bindings = (self.ctx.session.query(models.DistributedPortBinding). filter_by(port_id=port_id).all()) self.assertEqual(2, len(bindings)) - def test_delete_dvr_port_binding_if_stale(self): + def test_delete_distributed_port_binding_if_stale(self): network_id = 'foo_network_id' port_id = 'foo_port_id' self._setup_neutron_network(network_id, [port_id]) - binding = self._setup_dvr_binding( + binding = self._setup_distributed_binding( network_id, port_id, None, 'foo_host_id') - ml2_db.delete_dvr_port_binding_if_stale(self.ctx.session, binding) + ml2_db.delete_distributed_port_binding_if_stale(self.ctx.session, + binding) count = (self.ctx.session.query(models.DistributedPortBinding). filter_by(port_id=binding.port_id).count()) self.assertFalse(count) - def test_get_dvr_port_binding_by_host_not_found(self): - port = ml2_db.get_dvr_port_binding_by_host( + def test_get_distributed_port_binding_by_host_not_found(self): + port = ml2_db.get_distributed_port_binding_by_host( self.ctx.session, 'foo_port_id', 'foo_host_id') self.assertIsNone(port) - def test_get_dvr_port_bindings_not_found(self): - port = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id') + def test_get_distributed_port_bindings_not_found(self): + port = ml2_db.get_distributed_port_bindings(self.ctx.session, + 'foo_port_id') self.assertFalse(len(port)) - def test_get_dvr_port_bindings(self): + def test_get_distributed_port_bindings(self): network_id = 'foo_network_id' port_id_1 = 'foo_port_id_1' port_id_2 = 'foo_port_id_2' self._setup_neutron_network(network_id, [port_id_1, port_id_2]) router = self._setup_neutron_router() - self._setup_dvr_binding( + self._setup_distributed_binding( network_id, port_id_1, router.id, 'foo_host_id_1') - self._setup_dvr_binding( + self._setup_distributed_binding( network_id, port_id_1, router.id, 'foo_host_id_2') - ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'foo_port_id') + ports = ml2_db.get_distributed_port_bindings(self.ctx.session, + 'foo_port_id') self.assertEqual(2, len(ports)) - def test_dvr_port_binding_deleted_by_port_deletion(self): + def test_distributed_port_binding_deleted_by_port_deletion(self): with self.ctx.session.begin(subtransactions=True): self.ctx.session.add(models_v2.Network(id='network_id')) device_owner = constants.DEVICE_OWNER_DVR_INTERFACE @@ -408,5 +413,6 @@ with self.ctx.session.begin(subtransactions=True): self.ctx.session.delete(port) self.assertEqual([], warning_list) - ports = ml2_db.get_dvr_port_bindings(self.ctx.session, 'port_id') + ports = ml2_db.get_distributed_port_bindings(self.ctx.session, + 'port_id') self.assertEqual(0, len(ports)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_driver_context.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_driver_context.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_driver_context.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_driver_context.py 2016-08-03 20:10:34.000000000 +0000 @@ -53,7 +53,7 @@ network = mock.MagicMock() binding = mock.Mock() - port = {'device_owner': 'compute', + port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, portbindings.HOST_ID: 'host'} binding.host = 'foohost' @@ -92,7 +92,7 @@ network = mock.MagicMock() binding = mock.Mock() - port = {'device_owner': 'compute', + port = {'device_owner': constants.DEVICE_OWNER_COMPUTE_PREFIX, 'status': 'status'} binding.status = 'foostatus' diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_managers.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_managers.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_managers.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_managers.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,9 +16,14 @@ import mock +from oslo_db import exception as db_exc + +from neutron.plugins.ml2.common import exceptions as ml2_exc +from neutron.plugins.ml2 import config as config from neutron.plugins.ml2 import driver_api as api from neutron.plugins.ml2 import managers from neutron.tests import base +from neutron.tests.unit.plugins.ml2.drivers import mechanism_test class TestManagers(base.BaseTestCase): @@ -36,3 +41,39 @@ bindinglevel.segment_id = 'fake_seg_id1' self.assertTrue(manager._check_driver_to_bind( 'fake_driver', segments_to_bind, binding_levels)) + + +class TestMechManager(base.BaseTestCase): + def setUp(self): + config.cfg.CONF.set_override('mechanism_drivers', ['test'], + group='ml2') + super(TestMechManager, self).setUp() + self._manager = managers.MechanismManager() + + def _check_precommit(self, resource, operation): + meth_name = "%s_%s_precommit" % (operation, resource) + method = getattr(self._manager, meth_name) + fake_ctxt = mock.Mock() + fake_ctxt.current = {} + + with mock.patch.object(mechanism_test.TestMechanismDriver, meth_name, + side_effect=db_exc.DBDeadlock()): + self.assertRaises(db_exc.DBDeadlock, method, fake_ctxt) + + with mock.patch.object(mechanism_test.TestMechanismDriver, meth_name, + side_effect=RuntimeError()): + self.assertRaises(ml2_exc.MechanismDriverError, method, fake_ctxt) + + def _check_resource(self, resource): + self._check_precommit(resource, 'create') + self._check_precommit(resource, 'update') + self._check_precommit(resource, 'delete') + + def test_network_precommit(self): + self._check_resource('network') + + def test_subnet_precommit(self): + self._check_resource('subnet') + + def test_port_precommit(self): + self._check_resource('port') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_plugin.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_plugin.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_plugin.py 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -30,6 +30,7 @@ from neutron._i18n import _ from neutron.callbacks import events +from neutron.callbacks import exceptions as c_exc from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import utils @@ -58,6 +59,8 @@ from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.services.qos import qos_consts +from neutron.services.segments import db as segments_plugin_db +from neutron.services.segments import plugin as segments_plugin from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit import _test_extension_portbindings as test_bindings @@ -76,7 +79,7 @@ group='ml2_type_vlan') -PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' +PLUGIN_NAME = 'ml2' DEVICE_OWNER_COMPUTE = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' HOST = 'fake_host' @@ -108,9 +111,14 @@ l3_plugin = ('neutron.tests.unit.extensions.test_l3.' 'TestL3NatServicePlugin') + def get_additional_service_plugins(self): + """Subclasses can return a dictionary of service plugins to load.""" + return {} + def setup_parent(self): """Perform parent setup with the common plugin configuration class.""" service_plugins = {'l3_plugin_name': self.l3_plugin} + service_plugins.update(self.get_additional_service_plugins()) # Ensure that the parent setup can be called without arguments # by the common configuration setUp. parent_setup = functools.partial( @@ -138,7 +146,7 @@ [self.phys_vrange, self.phys2_vrange], group='ml2_type_vlan') self.setup_parent() - self.driver = ml2_plugin.Ml2Plugin() + self.driver = manager.NeutronManager.get_plugin() self.context = context.get_admin_context() @@ -242,6 +250,48 @@ ] self.nets = self.mp_nets + self.pnets + def test_network_after_create_callback(self): + after_create = mock.Mock() + registry.subscribe(after_create, resources.NETWORK, + events.AFTER_CREATE) + with self.network() as n: + after_create.assert_called_once_with( + resources.NETWORK, events.AFTER_CREATE, mock.ANY, + context=mock.ANY, network=mock.ANY) + kwargs = after_create.mock_calls[0][2] + self.assertEqual(n['network']['id'], + kwargs['network']['id']) + + def test_network_after_update_callback(self): + after_update = mock.Mock() + registry.subscribe(after_update, resources.NETWORK, + events.AFTER_UPDATE) + with self.network() as n: + data = {'network': {'name': 'updated'}} + req = self.new_update_request('networks', data, n['network']['id']) + self.deserialize(self.fmt, req.get_response(self.api)) + after_update.assert_called_once_with( + resources.NETWORK, events.AFTER_UPDATE, mock.ANY, + context=mock.ANY, network=mock.ANY, original_network=mock.ANY) + kwargs = after_update.mock_calls[0][2] + self.assertEqual(n['network']['name'], + kwargs['original_network']['name']) + self.assertEqual('updated', kwargs['network']['name']) + + def test_network_after_delete_callback(self): + after_delete = mock.Mock() + registry.subscribe(after_delete, resources.NETWORK, + events.AFTER_DELETE) + with self.network() as n: + req = self.new_delete_request('networks', n['network']['id']) + req.get_response(self.api) + after_delete.assert_called_once_with( + resources.NETWORK, events.AFTER_DELETE, mock.ANY, + context=mock.ANY, network=mock.ANY) + kwargs = after_delete.mock_calls[0][2] + self.assertEqual(n['network']['id'], + kwargs['network']['id']) + def test_port_delete_helper_tolerates_failure(self): plugin = manager.NeutronManager.get_plugin() with mock.patch.object(plugin, "delete_port", @@ -381,26 +431,64 @@ self.assertNotIn(mpnet.SEGMENTS, network['network']) -class TestMl2NetworksWithVlanTransparencyAndMTU(TestMl2NetworksV2): +class TestMl2NetworksWithVlanTransparencyBase(TestMl2NetworksV2): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1'}], + 'tenant_id': 'tenant_one', + 'vlan_transparent': 'True'}} + def setUp(self, plugin=None): - config.cfg.CONF.set_override('path_mtu', 1000, group='ml2') - config.cfg.CONF.set_override('global_physnet_mtu', 1000) - config.cfg.CONF.set_override('advertise_mtu', True) config.cfg.CONF.set_override('vlan_transparent', True) - super(TestMl2NetworksWithVlanTransparencyAndMTU, self).setUp(plugin) + super(TestMl2NetworksWithVlanTransparencyBase, self).setUp(plugin) + + +class TestMl2NetworksWithVlanTransparency( + TestMl2NetworksWithVlanTransparencyBase): + _mechanism_drivers = ['test'] + + def test_create_network_vlan_transparent_fail(self): + with mock.patch.object(mech_test.TestMechanismDriver, + 'check_vlan_transparency', + return_value=False): + network_req = self.new_create_request('networks', self.data) + res = network_req.get_response(self.api) + self.assertEqual(500, res.status_int) + error_result = self.deserialize(self.fmt, res)['NeutronError'] + self.assertEqual("VlanTransparencyDriverError", + error_result['type']) + + def test_create_network_vlan_transparent(self): + with mock.patch.object(mech_test.TestMechanismDriver, + 'check_vlan_transparency', + return_value=True): + network_req = self.new_create_request('networks', self.data) + res = network_req.get_response(self.api) + self.assertEqual(201, res.status_int) + network = self.deserialize(self.fmt, res)['network'] + self.assertIn('vlan_transparent', network) + + +class TestMl2NetworksWithVlanTransparencyAndMTU( + TestMl2NetworksWithVlanTransparencyBase): + _mechanism_drivers = ['test'] def test_create_network_vlan_transparent_and_mtu(self): - data = {'network': {'name': 'net1', - mpnet.SEGMENTS: - [{pnet.NETWORK_TYPE: 'vlan', - pnet.PHYSICAL_NETWORK: 'physnet1'}], - 'tenant_id': 'tenant_one'}} - network_req = self.new_create_request('networks', data) - res = network_req.get_response(self.api) - self.assertEqual(201, res.status_int) - network = self.deserialize(self.fmt, res)['network'] - self.assertEqual(1000, network['mtu']) - self.assertIn('vlan_transparent', network) + with mock.patch.object(mech_test.TestMechanismDriver, + 'check_vlan_transparency', + return_value=True): + config.cfg.CONF.set_override('path_mtu', 1000, group='ml2') + config.cfg.CONF.set_override('global_physnet_mtu', 1000) + config.cfg.CONF.set_override('advertise_mtu', True) + network_req = self.new_create_request('networks', self.data) + res = network_req.get_response(self.api) + self.assertEqual(201, res.status_int) + network = self.deserialize(self.fmt, res)['network'] + self.assertEqual(1000, network['mtu']) + self.assertIn('vlan_transparent', network) + self.assertTrue(network['vlan_transparent']) + self.assertTrue(network['vlan_transparent']) class TestMl2NetworksWithAvailabilityZone(TestMl2NetworksV2): @@ -420,6 +508,45 @@ class TestMl2SubnetsV2(test_plugin.TestSubnetsV2, Ml2PluginV2TestCase): + + def test_subnet_after_create_callback(self): + after_create = mock.Mock() + registry.subscribe(after_create, resources.SUBNET, events.AFTER_CREATE) + with self.subnet() as s: + after_create.assert_called_once_with( + resources.SUBNET, events.AFTER_CREATE, mock.ANY, + context=mock.ANY, subnet=mock.ANY) + kwargs = after_create.mock_calls[0][2] + self.assertEqual(s['subnet']['id'], kwargs['subnet']['id']) + + def test_subnet_after_update_callback(self): + after_update = mock.Mock() + registry.subscribe(after_update, resources.SUBNET, events.AFTER_UPDATE) + with self.subnet() as s: + data = {'subnet': {'name': 'updated'}} + req = self.new_update_request('subnets', data, s['subnet']['id']) + self.deserialize(self.fmt, req.get_response(self.api)) + after_update.assert_called_once_with( + resources.SUBNET, events.AFTER_UPDATE, mock.ANY, + context=mock.ANY, subnet=mock.ANY, + original_subnet=mock.ANY) + kwargs = after_update.mock_calls[0][2] + self.assertEqual(s['subnet']['name'], + kwargs['original_subnet']['name']) + self.assertEqual('updated', kwargs['subnet']['name']) + + def test_subnet_after_delete_callback(self): + after_delete = mock.Mock() + registry.subscribe(after_delete, resources.SUBNET, events.AFTER_DELETE) + with self.subnet() as s: + req = self.new_delete_request('subnets', s['subnet']['id']) + req.get_response(self.api) + after_delete.assert_called_once_with( + resources.SUBNET, events.AFTER_DELETE, mock.ANY, + context=mock.ANY, subnet=mock.ANY) + kwargs = after_delete.mock_calls[0][2] + self.assertEqual(s['subnet']['id'], kwargs['subnet']['id']) + def test_delete_subnet_race_with_dhcp_port_creation(self): with self.network() as network: with self.subnet(network=network) as subnet: @@ -512,6 +639,33 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): + def test__port_provisioned_with_blocks(self): + plugin = manager.NeutronManager.get_plugin() + ups = mock.patch.object(plugin, 'update_port_status').start() + with self.port() as port: + mock.patch('neutron.plugins.ml2.plugin.db.get_port').start() + provisioning_blocks.add_provisioning_component( + self.context, port['port']['id'], 'port', 'DHCP') + plugin._port_provisioned('port', 'evt', 'trigger', + self.context, port['port']['id']) + self.assertFalse(ups.called) + + def test__port_provisioned_no_binding(self): + plugin = manager.NeutronManager.get_plugin() + with self.network() as net: + net_id = net['network']['id'] + port_id = 'fake_id' + port_db = models_v2.Port( + id=port_id, tenant_id='tenant', network_id=net_id, + mac_address='08:00:01:02:03:04', admin_state_up=True, + status='ACTIVE', device_id='vm_id', + device_owner=DEVICE_OWNER_COMPUTE + ) + with self.context.session.begin(): + self.context.session.add(port_db) + self.assertIsNone(plugin._port_provisioned('port', 'evt', 'trigger', + self.context, port_id)) + def test_create_router_port_and_fail_create_postcommit(self): with mock.patch.object(managers.MechanismManager, @@ -579,6 +733,13 @@ with self.port(): self.assertTrue(ap.called) + def test_dhcp_provisioning_blocks_skipped_with_network_port(self): + self._add_fake_dhcp_agent() + with mock.patch.object(provisioning_blocks, + 'add_provisioning_component') as ap: + with self.port(device_owner=constants.DEVICE_OWNER_DHCP): + self.assertFalse(ap.called) + def test_dhcp_provisioning_blocks_skipped_on_create_with_no_dhcp(self): self._add_fake_dhcp_agent() with self.subnet(enable_dhcp=False) as subnet: @@ -913,7 +1074,7 @@ raise db_exc.DBDuplicateEntry() listener = IPAllocationsGrenade() - engine = db_api.get_engine() + engine = db_api.context_manager.get_legacy_facade().get_engine() event.listen(engine, 'before_cursor_execute', listener.execute) event.listen(engine, 'commit', listener.commit) self.addCleanup(event.remove, engine, 'before_cursor_execute', @@ -1000,6 +1161,110 @@ self.context, port_id)) +class Test_GetNetworkMtu(Ml2PluginV2TestCase): + + def test_get_mtu_with_physical_net(self): + plugin = manager.NeutronManager.get_plugin() + mock_type_driver = mock.MagicMock() + plugin.type_manager.drivers['driver1'] = mock.Mock() + plugin.type_manager.drivers['driver1'].obj = mock_type_driver + net = { + 'name': 'net1', + pnet.NETWORK_TYPE: 'driver1', + pnet.PHYSICAL_NETWORK: 'physnet1', + } + plugin._get_network_mtu(net) + mock_type_driver.get_mtu.assert_called_once_with('physnet1') + + def _register_type_driver_with_mtu(self, driver, mtu): + plugin = manager.NeutronManager.get_plugin() + + class FakeDriver(object): + def get_mtu(self, physical_network=None): + return mtu + + driver_mock = mock.Mock() + driver_mock.obj = FakeDriver() + plugin.type_manager.drivers[driver] = driver_mock + + def test_single_segment(self): + plugin = manager.NeutronManager.get_plugin() + self._register_type_driver_with_mtu('driver1', 1400) + + net = { + 'name': 'net1', + mpnet.SEGMENTS: [ + { + pnet.NETWORK_TYPE: 'driver1', + pnet.PHYSICAL_NETWORK: 'physnet1' + }, + ] + } + self.assertEqual(1400, plugin._get_network_mtu(net)) + + def test_multiple_segments_returns_minimal_mtu(self): + plugin = manager.NeutronManager.get_plugin() + self._register_type_driver_with_mtu('driver1', 1400) + self._register_type_driver_with_mtu('driver2', 1300) + + net = { + 'name': 'net1', + mpnet.SEGMENTS: [ + { + pnet.NETWORK_TYPE: 'driver1', + pnet.PHYSICAL_NETWORK: 'physnet1' + }, + { + pnet.NETWORK_TYPE: 'driver2', + pnet.PHYSICAL_NETWORK: 'physnet2' + }, + ] + } + self.assertEqual(1300, plugin._get_network_mtu(net)) + + def test_no_segments(self): + plugin = manager.NeutronManager.get_plugin() + self._register_type_driver_with_mtu('driver1', 1400) + + net = { + 'name': 'net1', + pnet.NETWORK_TYPE: 'driver1', + pnet.PHYSICAL_NETWORK: 'physnet1', + } + self.assertEqual(1400, plugin._get_network_mtu(net)) + + def test_get_mtu_None_returns_0(self): + plugin = manager.NeutronManager.get_plugin() + self._register_type_driver_with_mtu('driver1', None) + + net = { + 'name': 'net1', + pnet.NETWORK_TYPE: 'driver1', + pnet.PHYSICAL_NETWORK: 'physnet1', + } + self.assertEqual(0, plugin._get_network_mtu(net)) + + def test_unknown_segment_type_ignored(self): + plugin = manager.NeutronManager.get_plugin() + self._register_type_driver_with_mtu('driver1', None) + self._register_type_driver_with_mtu('driver2', 1300) + + net = { + 'name': 'net1', + mpnet.SEGMENTS: [ + { + pnet.NETWORK_TYPE: 'driver1', + pnet.PHYSICAL_NETWORK: 'physnet1' + }, + { + pnet.NETWORK_TYPE: 'driver2', + pnet.PHYSICAL_NETWORK: 'physnet2' + }, + ] + } + self.assertEqual(1300, plugin._get_network_mtu(net)) + + class TestMl2DvrPortsV2(TestMl2PortsV2): def setUp(self): super(TestMl2DvrPortsV2, self).setUp() @@ -1287,7 +1552,7 @@ self.assertTrue(update_mock.mock_calls) self.assertEqual('test', binding.host) - def test_process_dvr_port_binding_update_router_id(self): + def test_process_distributed_port_binding_update_router_id(self): host_id = 'host' binding = models.DistributedPortBinding( port_id='port_id', @@ -1307,33 +1572,37 @@ return_value=[]): mech_context = driver_context.PortContext( self, context, mock_port, mock_network, binding, None) - plugin._process_dvr_port_binding(mech_context, context, attrs) + plugin._process_distributed_port_binding(mech_context, + context, attrs) self.assertEqual(new_router_id, mech_context._binding.router_id) self.assertEqual(host_id, mech_context._binding.host) - def test_update_dvr_port_binding_on_concurrent_port_delete(self): + def test_update_distributed_port_binding_on_concurrent_port_delete(self): plugin = manager.NeutronManager.get_plugin() with self.port() as port: port = { 'id': port['port']['id'], portbindings.HOST_ID: 'foo_host', } - with mock.patch.object(plugin, 'get_port', new=plugin.delete_port): - res = plugin.update_dvr_port_binding( - self.context, 'foo_port_id', {'port': port}) + exc = db_exc.DBReferenceError('', '', '', '') + with mock.patch.object(ml2_db, 'ensure_distributed_port_binding', + side_effect=exc): + res = plugin.update_distributed_port_binding( + self.context, port['id'], {'port': port}) self.assertIsNone(res) - def test_update_dvr_port_binding_on_non_existent_port(self): + def test_update_distributed_port_binding_on_non_existent_port(self): plugin = manager.NeutronManager.get_plugin() port = { 'id': 'foo_port_id', portbindings.HOST_ID: 'foo_host', } - with mock.patch.object(ml2_db, 'ensure_dvr_port_binding') as mock_dvr: - plugin.update_dvr_port_binding( + with mock.patch.object( + ml2_db, 'ensure_distributed_port_binding') as mock_dist: + plugin.update_distributed_port_binding( self.context, 'foo_port_id', {'port': port}) - self.assertFalse(mock_dvr.called) + self.assertFalse(mock_dist.called) class TestMl2PortBindingNoSG(TestMl2PortBinding): @@ -1367,7 +1636,7 @@ driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( - self.context.session, network_id, segment) + self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context.session, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) @@ -1378,7 +1647,7 @@ driver_api.SEGMENTATION_ID: 1234, driver_api.PHYSICAL_NETWORK: 'physnet3'} self.driver.type_manager.allocate_dynamic_segment( - self.context.session, network_id, segment2) + self.context, network_id, segment2) dynamic_segment = segments_db.get_dynamic_segment( self.context.session, network_id, segmentation_id='1234') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) @@ -1396,7 +1665,7 @@ driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( - self.context.session, network_id, segment) + self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context.session, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) @@ -1411,7 +1680,7 @@ segment2 = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet2'} self.driver.type_manager.allocate_dynamic_segment( - self.context.session, network_id, segment2) + self.context, network_id, segment2) dynamic_segment2 = segments_db.get_dynamic_segment( self.context.session, network_id, 'physnet2') dynamic_segmentation2_id = dynamic_segment2[driver_api.SEGMENTATION_ID] @@ -1427,7 +1696,7 @@ driver_api.PHYSICAL_NETWORK: 'physnet1'} network_id = network['network']['id'] self.driver.type_manager.allocate_dynamic_segment( - self.context.session, network_id, segment) + self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context.session, network_id, 'physnet1') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) @@ -1564,7 +1833,7 @@ segment = {driver_api.NETWORK_TYPE: 'vlan', driver_api.PHYSICAL_NETWORK: 'physnet2'} self.driver.type_manager.allocate_dynamic_segment( - self.context.session, network_id, segment) + self.context, network_id, segment) dynamic_segment = segments_db.get_dynamic_segment( self.context.session, network_id, 'physnet2') self.assertEqual('vlan', dynamic_segment[driver_api.NETWORK_TYPE]) @@ -1574,6 +1843,7 @@ with mock.patch.object(type_vlan.VlanTypeDriver, 'release_segment') as rs: + segments_plugin_db.subscribe() req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(2, rs.call_count) @@ -1737,18 +2007,22 @@ def test_create_network_faulty(self): + err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'create_network_postcommit', - side_effect=ml2_exc.MechanismDriverError): + side_effect=(exc.InvalidInput( + error_message=err_msg))): tenant_id = str(uuid.uuid4()) data = {'network': {'name': 'net1', 'tenant_id': tenant_id}} req = self.new_create_request('networks', data) res = req.get_response(self.api) - self.assertEqual(500, res.status_int) + self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) - self.assertEqual('MechanismDriverError', + self.assertEqual('InvalidInput', error['NeutronError']['type']) + # Check the client can see the root cause of error. + self.assertIn(err_msg, error['NeutronError']['message']) query_params = "tenant_id=%s" % tenant_id nets = self._list('networks', query_params=query_params) self.assertFalse(nets['networks']) @@ -1778,9 +2052,11 @@ def test_update_network_faulty(self): + err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'update_network_postcommit', - side_effect=ml2_exc.MechanismDriverError): + side_effect=(exc.InvalidInput( + error_message=err_msg))): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_network_postcommit') as unp: @@ -1796,10 +2072,12 @@ data = {'network': {'name': new_name}} req = self.new_update_request('networks', data, net_id) res = req.get_response(self.api) - self.assertEqual(500, res.status_int) + self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) - self.assertEqual('MechanismDriverError', + self.assertEqual('InvalidInput', error['NeutronError']['type']) + # Check the client can see the root cause of error. + self.assertIn(err_msg, error['NeutronError']['message']) # Test if other mechanism driver was called self.assertTrue(unp.called) net = self._show('networks', net_id) @@ -1809,9 +2087,11 @@ def test_create_subnet_faulty(self): + err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'create_subnet_postcommit', - side_effect=ml2_exc.MechanismDriverError): + side_effect=(exc.InvalidInput( + error_message=err_msg))): with self.network() as network: net_id = network['network']['id'] @@ -1824,10 +2104,12 @@ 'gateway_ip': '10.0.20.1'}} req = self.new_create_request('subnets', data) res = req.get_response(self.api) - self.assertEqual(500, res.status_int) + self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) - self.assertEqual('MechanismDriverError', + self.assertEqual('InvalidInput', error['NeutronError']['type']) + # Check the client can see the root cause of error. + self.assertIn(err_msg, error['NeutronError']['message']) query_params = "network_id=%s" % net_id subnets = self._list('subnets', query_params=query_params) self.assertFalse(subnets['subnets']) @@ -1865,9 +2147,11 @@ def test_update_subnet_faulty(self): + err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'update_subnet_postcommit', - side_effect=ml2_exc.MechanismDriverError): + side_effect=(exc.InvalidInput( + error_message=err_msg))): with mock.patch.object(mech_logger.LoggerMechanismDriver, 'update_subnet_postcommit') as usp: @@ -1889,10 +2173,12 @@ data = {'subnet': {'name': new_name}} req = self.new_update_request('subnets', data, subnet_id) res = req.get_response(self.api) - self.assertEqual(500, res.status_int) + self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) - self.assertEqual('MechanismDriverError', + self.assertEqual('InvalidInput', error['NeutronError']['type']) + # Check the client can see the root cause of error. + self.assertIn(err_msg, error['NeutronError']['message']) # Test if other mechanism driver was called self.assertTrue(usp.called) subnet = self._show('subnets', subnet_id) @@ -1902,9 +2188,11 @@ def test_create_port_faulty(self): + err_msg = "Some errors" with mock.patch.object(mech_test.TestMechanismDriver, 'create_port_postcommit', - side_effect=ml2_exc.MechanismDriverError): + side_effect=(exc.InvalidInput( + error_message=err_msg))): with self.network() as network: net_id = network['network']['id'] @@ -1916,10 +2204,12 @@ 'fixed_ips': []}} req = self.new_create_request('ports', data) res = req.get_response(self.api) - self.assertEqual(500, res.status_int) + self.assertEqual(400, res.status_int) error = self.deserialize(self.fmt, res) - self.assertEqual('MechanismDriverError', + self.assertEqual('InvalidInput', error['NeutronError']['type']) + # Check the client can see the root cause of error. + self.assertIn(err_msg, error['NeutronError']['message']) query_params = "network_id=%s" % net_id ports = self._list('ports', query_params=query_params) self.assertFalse(ports['ports']) @@ -1957,8 +2247,8 @@ self._delete('ports', port['port']['id']) - def test_update_dvr_router_interface_port(self): - """Test validate dvr router interface update succeeds.""" + def test_update_distributed_router_interface_port(self): + """Test validate distributed router interface update succeeds.""" host_id = 'host' binding = models.DistributedPortBinding( port_id='port_id', @@ -1974,9 +2264,9 @@ mock.patch.object( mech_test.TestMechanismDriver, 'update_port_precommit') as port_pre,\ - mock.patch.object(ml2_db, - 'get_dvr_port_bindings') as dvr_bindings: - dvr_bindings.return_value = [binding] + mock.patch.object( + ml2_db, 'get_distributed_port_bindings') as dist_bindings: + dist_bindings.return_value = [binding] port_pre.return_value = True with self.network() as network: with self.subnet(network=network) as subnet: @@ -2001,7 +2291,7 @@ req = self.new_update_request('ports', data, port_id) res = req.get_response(self.api) self.assertEqual(200, res.status_int) - self.assertTrue(dvr_bindings.called) + self.assertTrue(dist_bindings.called) self.assertTrue(port_pre.called) self.assertTrue(port_post.called) port = self._show('ports', port_id) @@ -2030,6 +2320,25 @@ driver_mock().allocate_subnet.assert_called_with(mock.ANY) driver_mock().remove_subnet.assert_called_with(request.subnet_id) + def test_delete_subnet_deallocates_slaac_correctly(self): + driver = 'neutron.ipam.drivers.neutrondb_ipam.driver.NeutronDbPool' + with self.network() as network: + with self.subnet(network=network, + cidr='2001:100::0/64', + ip_version=6, + ipv6_ra_mode=constants.IPV6_SLAAC) as subnet: + with self.port(subnet=subnet) as port: + with mock.patch(driver) as driver_mock: + # Validate that deletion of SLAAC allocation happens + # via IPAM interface, i.e. ipam_subnet.deallocate is + # called prior to subnet deletiong from db. + self._delete('subnets', subnet['subnet']['id']) + dealloc = driver_mock().get_subnet().deallocate + dealloc.assert_called_with( + port['port']['fixed_ips'][0]['ip_address']) + driver_mock().remove_subnet.assert_called_with( + subnet['subnet']['id']) + class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase): @@ -2039,6 +2348,7 @@ # neutron.objects.db.api from core plugin instance self.setup_coreplugin(PLUGIN_NAME) self.context = mock.MagicMock() + self.context.session.is_active = False self.notify_p = mock.patch('neutron.callbacks.registry.notify') self.notify = self.notify_p.start() @@ -2069,7 +2379,9 @@ mock.patch.object(base_plugin.NeutronDbPluginV2, 'update_port'),\ mock.patch.object(base_plugin.NeutronDbPluginV2, - 'create_port_db'): + 'create_port_db'),\ + mock.patch.object(ml2_plugin.Ml2Plugin, + '_get_network_mtu'): init.return_value = None new_port = mock.MagicMock() @@ -2107,7 +2419,9 @@ mock.patch.object(ml2_db, 'get_locked_port_and_binding', return_value=(original_port_db, binding)),\ mock.patch.object(base_plugin.NeutronDbPluginV2, - 'update_port') as db_update_port: + 'update_port') as db_update_port,\ + mock.patch.object(ml2_plugin.Ml2Plugin, + '_get_network_mtu'): init.return_value = None updated_port = mock.MagicMock() db_update_port.return_value = updated_port @@ -2138,7 +2452,9 @@ return_value=None),\ mock.patch.object(manager.NeutronManager, 'get_service_plugins', - return_value={'L3_ROUTER_NAT': l3plugin}): + return_value={'L3_ROUTER_NAT': l3plugin}),\ + mock.patch.object(ml2_plugin.Ml2Plugin, + '_get_network_mtu'): plugin = self._create_plugin_for_create_update_port() # Set backend manually here since __init__ was mocked plugin.set_ipam_backend() @@ -2150,15 +2466,143 @@ class TestTransactionGuard(Ml2PluginV2TestCase): def test_delete_network_guard(self): - plugin = ml2_plugin.Ml2Plugin() + plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): with testtools.ExpectedException(RuntimeError): plugin.delete_network(ctx, 'id') def test_delete_subnet_guard(self): - plugin = ml2_plugin.Ml2Plugin() + plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): with testtools.ExpectedException(RuntimeError): plugin.delete_subnet(ctx, 'id') + + +class TestML2Segments(Ml2PluginV2TestCase): + + def _reserve_segment(self, network, seg_id=None): + segment = {'id': 'fake_id', + 'network_id': network['network']['id'], + 'tenant_id': network['network']['tenant_id'], + driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet} + if seg_id: + segment[driver_api.SEGMENTATION_ID] = seg_id + + self.driver._handle_segment_change( + mock.ANY, events.PRECOMMIT_CREATE, segments_plugin.Plugin(), + self.context, segment) + + if seg_id: + # Assert it is not changed + self.assertEqual(seg_id, segment[driver_api.SEGMENTATION_ID]) + else: + self.assertTrue(segment[driver_api.SEGMENTATION_ID] > 0) + + return segment + + def test_reserve_segment_success_with_partial_segment(self): + with self.network() as network: + self._reserve_segment(network) + + def test_reserve_segment_fail_with_duplicate_param(self): + with self.network() as network: + self._reserve_segment(network, 10) + + self.assertRaises( + exc.VlanIdInUse, self._reserve_segment, network, 10) + + def test_reserve_segment_update_network_mtu(self): + with self.network() as network: + network_id = network['network']['id'] + with mock.patch.object( + self.driver, '_get_network_mtu') as mtu: + mtu.return_value = 100 + self._reserve_segment(network) + updated_network = self.driver.get_network(self.context, + network_id) + self.assertEqual(100, updated_network[driver_api.MTU]) + + mtu.return_value = 200 + self._reserve_segment(network) + updated_network = self.driver.get_network(self.context, + network_id) + self.assertEqual(200, updated_network[driver_api.MTU]) + + def _test_nofity_mechanism_manager(self, event): + seg1 = {driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet, + driver_api.SEGMENTATION_ID: 1000} + seg2 = {driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet, + driver_api.SEGMENTATION_ID: 1001} + seg3 = {driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet, + driver_api.SEGMENTATION_ID: 1002} + with self.network() as network: + network = network['network'] + + for stale_seg in segments_db.get_network_segments(self.context.session, + network['id']): + segments_db.delete_network_segment(self.context.session, + stale_seg['id']) + + for seg in [seg1, seg2, seg3]: + seg['network_id'] = network['id'] + segments_db.add_network_segment(self.context, network['id'], seg) + + self.net_context = None + + def record_network_context(net_context): + self.net_context = net_context + + with mock.patch.object(managers.MechanismManager, + 'update_network_precommit', + side_effect=record_network_context): + self.driver._handle_segment_change( + mock.ANY, event, segments_plugin.Plugin(), self.context, seg1) + # Make sure the mechanism manager can get the right amount of + # segments of network + self.assertEqual(3, len(self.net_context.current[mpnet.SEGMENTS])) + + def test_reserve_segment_nofity_mechanism_manager(self): + self._test_nofity_mechanism_manager(events.PRECOMMIT_CREATE) + + def test_release_segment(self): + with self.network() as network: + segment = self._reserve_segment(network, 10) + segment['network_id'] = network['network']['id'] + self.driver._handle_segment_change( + mock.ANY, events.PRECOMMIT_DELETE, mock.ANY, + self.context, segment) + # Check that the segment_id is not reserved + segment = self._reserve_segment( + network, segment[driver_api.SEGMENTATION_ID]) + + def test_release_segment_nofity_mechanism_manager(self): + self._test_nofity_mechanism_manager(events.PRECOMMIT_DELETE) + + def test_prevent_delete_segment_with_tenant_port(self): + fake_owner_compute = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' + ml2_db.subscribe() + plugin = manager.NeutronManager.get_plugin() + with self.port(device_owner=fake_owner_compute) as port: + binding = ml2_db.get_locked_port_and_binding(self.context.session, + port['port']['id'])[1] + binding['host'] = 'host-ovs-no_filter' + mech_context = driver_context.PortContext( + plugin, self.context, port['port'], + plugin.get_network(self.context, port['port']['network_id']), + binding, None) + plugin._bind_port_if_needed(mech_context) + segment = segments_db.get_network_segments( + self.context.session, port['port']['network_id'])[0] + segment['network_id'] = port['port']['network_id'] + self.assertRaises(c_exc.CallbackFailure, registry.notify, + resources.SEGMENT, events.BEFORE_DELETE, + mock.ANY, + context=self.context, segment=segment) + exist_port = self._show('ports', port['port']['id']) + self.assertEqual(port['port']['id'], exist_port['port']['id']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_port_binding.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_port_binding.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_port_binding.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_port_binding.py 2016-08-03 20:10:34.000000000 +0000 @@ -25,13 +25,8 @@ from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin -PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' - - class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase): - _plugin_name = PLUGIN_NAME - def setUp(self): # Enable the test mechanism driver to ensure that # we can successfully call through to all mechanism @@ -42,7 +37,7 @@ config.cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:1099'], group='ml2_type_vlan') - super(PortBindingTestCase, self).setUp(PLUGIN_NAME) + super(PortBindingTestCase, self).setUp('ml2') self.port_create_status = 'DOWN' self.plugin = manager.NeutronManager.get_plugin() self.plugin.start_rpc_listeners() @@ -212,7 +207,7 @@ port_dict = plugin.get_port(ctx, port['port']['id']) self.assertEqual(const.PORT_STATUS_DOWN, port_dict['status']) - def test_dvr_binding(self): + def test_distributed_binding(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] @@ -223,9 +218,9 @@ self.assertEqual('DOWN', port['port']['status']) # Update port to bind for a host. - self.plugin.update_dvr_port_binding(ctx, port_id, {'port': { - portbindings.HOST_ID: 'host-ovs-no_filter', - 'device_id': 'router1'}}) + self.plugin.update_distributed_port_binding(ctx, port_id, {'port': + {portbindings.HOST_ID: 'host-ovs-no_filter', + 'device_id': 'router1'}}) # Get port and verify VIF type and status unchanged. port = self._show('ports', port_id) @@ -267,15 +262,15 @@ port['port'][portbindings.VIF_TYPE]) self.assertEqual('DOWN', port['port']['status']) - def test_dvr_binding_multi_host_status(self): + def test_distributed_binding_multi_host_status(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] # Update port to bind for 1st host. - self.plugin.update_dvr_port_binding(ctx, port_id, {'port': { - portbindings.HOST_ID: 'host-ovs-no_filter', - 'device_id': 'router1'}}) + self.plugin.update_distributed_port_binding(ctx, port_id, {'port': + {portbindings.HOST_ID: 'host-ovs-no_filter', + 'device_id': 'router1'}}) # Mark 1st device up. self.plugin.endpoints[0].update_device_up( @@ -287,9 +282,9 @@ self.assertEqual('ACTIVE', port['port']['status']) # Update port to bind for a 2nd host. - self.plugin.update_dvr_port_binding(ctx, port_id, {'port': { - portbindings.HOST_ID: 'host-bridge-filter', - 'device_id': 'router1'}}) + self.plugin.update_distributed_port_binding(ctx, port_id, {'port': + {portbindings.HOST_ID: 'host-bridge-filter', + 'device_id': 'router1'}}) # Mark 2nd device up. self.plugin.endpoints[0].update_device_up( @@ -318,7 +313,7 @@ port = self._show('ports', port_id) self.assertEqual('DOWN', port['port']['status']) - def test_dvr_binding_update_unbound_host(self): + def test_distributed_binding_update_unbound_host(self): ctx = context.get_admin_context() with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port: port_id = port['port']['id'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_rpc.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_rpc.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_rpc.py 2016-05-23 16:29:20.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_rpc.py 2016-08-29 20:05:49.000000000 +0000 @@ -222,6 +222,9 @@ def test_update_device_up_with_device_not_bound_to_host(self): self.assertIsNone(self._test_update_device_not_bound_to_host( self.callbacks.update_device_up)) + port = self.plugin._get_port.return_value + (self.plugin.nova_notifier.notify_port_active_direct. + assert_called_once_with(port)) def test_update_device_down_with_device_not_bound_to_host(self): self.assertEqual( @@ -309,7 +312,8 @@ class RpcApiTestCase(base.BaseTestCase): def _test_rpc_api(self, rpcapi, topic, method, rpc_method, **kwargs): - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') expected_retval = 'foo' if rpc_method == 'call' else None expected_version = kwargs.pop('version', None) fanout = kwargs.pop('fanout', False) @@ -440,7 +444,8 @@ def test_update_device_list_unsupported(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') devices_up = ['fake_device1', 'fake_device2'] devices_down = ['fake_device3', 'fake_device4'] expected_ret_val = {'devices_up': ['fake_device2'], @@ -482,7 +487,8 @@ def test_get_devices_details_list_and_failed_devices_unsupported(self): rpcapi = agent_rpc.PluginApi(topics.PLUGIN) - ctxt = oslo_context.RequestContext('fake_user', 'fake_project') + ctxt = oslo_context.RequestContext(user='fake_user', + tenant='fake_project') devices = ['fake_device1', 'fake_device2'] dev2_details = {'device': 'fake_device2', 'network_id': 'net_id', 'port_id': 'port_id', 'admin_state_up': True} diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_security_group.py neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_security_group.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/plugins/ml2/test_security_group.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/plugins/ml2/test_security_group.py 2016-08-03 20:10:34.000000000 +0000 @@ -27,12 +27,10 @@ from neutron.tests.unit.api.v2 import test_base from neutron.tests.unit.extensions import test_securitygroup as test_sg -PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin' NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' class Ml2SecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase): - _plugin_name = PLUGIN_NAME def setUp(self, plugin=None): test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER) @@ -41,7 +39,7 @@ self.notifier = mock.Mock() notifier_cls.return_value = self.notifier self.useFixture(tools.AttributeMapMemento()) - super(Ml2SecurityGroupsTestCase, self).setUp(PLUGIN_NAME) + super(Ml2SecurityGroupsTestCase, self).setUp('ml2') def tearDown(self): super(Ml2SecurityGroupsTestCase, self).tearDown() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/quota/__init__.py neutron-9.0.0~b3~dev557/neutron/tests/unit/quota/__init__.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/quota/__init__.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/quota/__init__.py 2016-08-29 20:05:49.000000000 +0000 @@ -19,9 +19,9 @@ # Model classes for test resources -class MehModel(model_base.BASEV2, model_base.HasTenant): +class MehModel(model_base.BASEV2, model_base.HasProject): meh = sa.Column(sa.String(8), primary_key=True) -class OtherMehModel(model_base.BASEV2, model_base.HasTenant): +class OtherMehModel(model_base.BASEV2, model_base.HasProject): othermeh = sa.Column(sa.String(8), primary_key=True) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py 2016-08-29 20:05:49.000000000 +0000 @@ -28,6 +28,7 @@ from neutron.db.network_dhcp_agent_binding import models as ndab_model from neutron.extensions import dhcpagentscheduler from neutron.scheduler import dhcp_agent_scheduler +from neutron.services.segments import db as segments_service_db from neutron.tests.common import helpers from neutron.tests.unit.plugins.ml2 import test_plugin from neutron.tests.unit import testlib_api @@ -142,7 +143,8 @@ self._test_get_agents_and_scheduler_for_dead_agent()) plugin = mock.Mock() plugin.get_subnets.return_value = [{"network_id": self.network_id, - "enable_dhcp": True}] + "enable_dhcp": True, + "segment_id": None}] plugin.get_network.return_value = self.network if active_hosts_only: plugin.get_dhcp_agents_hosting_networks.return_value = [] @@ -263,8 +265,8 @@ def test_auto_schedule_network(self): plugin = mock.MagicMock() plugin.get_subnets.return_value = ( - [{"network_id": self.network_id, "enable_dhcp": self.enable_dhcp}] - if self.network_present else []) + [{"network_id": self.network_id, "enable_dhcp": self.enable_dhcp, + "segment_id": None}] if self.network_present else []) plugin.get_network.return_value = {'availability_zone_hints': self.az_hints} scheduler = dhcp_agent_scheduler.ChanceScheduler() @@ -290,6 +292,112 @@ self.assertEqual(expected_hosted_agents, len(hosted_agents)) +class TestAutoScheduleSegments(test_plugin.Ml2PluginV2TestCase, + TestDhcpSchedulerBaseTestCase): + """Unit test scenarios for ChanceScheduler""" + + def setUp(self): + super(TestAutoScheduleSegments, self).setUp() + self.plugin = self.driver + self.segments_plugin = importutils.import_object( + 'neutron.services.segments.plugin.Plugin') + self.ctx = context.get_admin_context() + + def _create_network(self): + net = self.plugin.create_network( + self.ctx, + {'network': {'name': 'name', + 'tenant_id': 'tenant_one', + 'admin_state_up': True, + 'shared': True}}) + return net['id'] + + def _create_segment(self, network_id): + seg = self.segments_plugin.create_segment( + self.ctx, + {'segment': {'network_id': network_id, + 'physical_network': 'physnet1', + 'network_type': 'vlan', + 'segmentation_id': constants.ATTR_NOT_SPECIFIED}}) + return seg['id'] + + def _create_subnet(self, segment_id, network_id, cidr='192.168.10.0/24'): + subnet = self.plugin.create_subnet( + self.ctx, + {'subnet': {'name': 'name', + 'ip_version': 4, + 'network_id': network_id, + 'cidr': cidr, + 'gateway_ip': constants.ATTR_NOT_SPECIFIED, + 'allocation_pools': constants.ATTR_NOT_SPECIFIED, + 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, + 'host_routes': constants.ATTR_NOT_SPECIFIED, + 'tenant_id': 'tenant_one', + 'enable_dhcp': True, + 'segment_id': segment_id}}) + return subnet['id'] + + def test_auto_schedule_one_network_one_segment_one_subnet(self): + net_id = self._create_network() + seg_id = self._create_segment(net_id) + self._create_subnet(seg_id, net_id) + helpers.register_dhcp_agent(HOST_C) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg_id}) + scheduler = dhcp_agent_scheduler.ChanceScheduler() + observed_return_val = scheduler.auto_schedule_networks( + self.plugin, self.ctx, HOST_C) + self.assertTrue(observed_return_val) + agent1 = self.plugin.get_dhcp_agents_hosting_networks( + self.ctx, [net_id]) + self.assertEqual(1, len(agent1)) + self.assertEqual('host-c', agent1[0]['host']) + + def test_auto_schedule_one_network_one_segment_two_subnet(self): + net_id = self._create_network() + seg_id = self._create_segment(net_id) + self._create_subnet(seg_id, net_id) + self._create_subnet(seg_id, net_id, '192.168.11.0/24') + helpers.register_dhcp_agent(HOST_C) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg_id}) + scheduler = dhcp_agent_scheduler.ChanceScheduler() + observed_return_val = scheduler.auto_schedule_networks( + self.plugin, self.ctx, HOST_C) + self.assertTrue(observed_return_val) + agent1 = self.plugin.get_dhcp_agents_hosting_networks( + self.ctx, [net_id]) + self.assertEqual(1, len(agent1)) + self.assertEqual('host-c', agent1[0]['host']) + + def test_auto_schedule_one_network_two_segments_with_one_subnet_each(self): + net_id = self._create_network() + seg1_id = self._create_segment(net_id) + self._create_subnet(seg1_id, net_id) + helpers.register_dhcp_agent(HOST_D) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_D, {seg1_id}) + scheduler = dhcp_agent_scheduler.ChanceScheduler() + observed_val_first_segment = scheduler.auto_schedule_networks( + self.plugin, self.ctx, HOST_D) + self.assertTrue(observed_val_first_segment) + agents = self.plugin.get_dhcp_agents_hosting_networks( + self.ctx, [net_id]) + self.assertEqual(1, len(agents)) + + seg2_id = self._create_segment(net_id) + self._create_subnet(seg2_id, net_id, '192.168.11.0/24') + helpers.register_dhcp_agent(HOST_C) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg2_id}) + observed_val_second_segment = scheduler.auto_schedule_networks( + self.plugin, self.ctx, HOST_C) + self.assertTrue(observed_val_second_segment) + agents = self.plugin.get_dhcp_agents_hosting_networks( + self.ctx, [net_id]) + self.assertEqual(2, len(agents)) + + class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin, common_db_mixin.CommonDbMixin): @@ -397,14 +505,15 @@ weight_scheduler = ( 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler') cfg.CONF.set_override('network_scheduler_driver', weight_scheduler) - self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' - 'Ml2Plugin') + self.plugin = self.driver mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() self.plugin.network_scheduler = importutils.import_object( weight_scheduler) cfg.CONF.set_override("dhcp_load_type", "networks") + self.segments_plugin = importutils.import_object( + 'neutron.services.segments.plugin.Plugin') self.ctx = context.get_admin_context() def _create_network(self): @@ -416,6 +525,15 @@ 'shared': True}}) return net['id'] + def _create_segment(self, network_id): + seg = self.segments_plugin.create_segment( + self.ctx, + {'segment': {'network_id': network_id, + 'physical_network': 'physnet1', + 'network_type': 'vlan', + 'segmentation_id': constants.ATTR_NOT_SPECIFIED}}) + return seg['id'] + def test_scheduler_one_agents_per_network(self): net_id = self._create_network() helpers.register_dhcp_agent(HOST_C) @@ -468,6 +586,85 @@ self.assertEqual('host-c', agent2[0]['host']) self.assertEqual('host-d', agent3[0]['host']) + def test_schedule_segment_one_hostable_agent(self): + net_id = self._create_network() + seg_id = self._create_segment(net_id) + helpers.register_dhcp_agent(HOST_C) + helpers.register_dhcp_agent(HOST_D) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg_id}) + net = self.plugin.get_network(self.ctx, net_id) + seg = self.segments_plugin.get_segment(self.ctx, seg_id) + net['candidate_hosts'] = seg['hosts'] + agents = self.plugin.network_scheduler.schedule( + self.plugin, self.ctx, net) + self.assertEqual(1, len(agents)) + self.assertEqual(HOST_C, agents[0].host) + + def test_schedule_segment_many_hostable_agents(self): + net_id = self._create_network() + seg_id = self._create_segment(net_id) + helpers.register_dhcp_agent(HOST_C) + helpers.register_dhcp_agent(HOST_D) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg_id}) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_D, {seg_id}) + net = self.plugin.get_network(self.ctx, net_id) + seg = self.segments_plugin.get_segment(self.ctx, seg_id) + net['candidate_hosts'] = seg['hosts'] + agents = self.plugin.network_scheduler.schedule( + self.plugin, self.ctx, net) + self.assertEqual(1, len(agents)) + self.assertIn(agents[0].host, [HOST_C, HOST_D]) + + def test_schedule_segment_no_host_mapping(self): + net_id = self._create_network() + seg_id = self._create_segment(net_id) + helpers.register_dhcp_agent(HOST_C) + helpers.register_dhcp_agent(HOST_D) + net = self.plugin.get_network(self.ctx, net_id) + seg = self.segments_plugin.get_segment(self.ctx, seg_id) + net['candidate_hosts'] = seg['hosts'] + agents = self.plugin.network_scheduler.schedule( + self.plugin, self.ctx, net) + self.assertEqual(0, len(agents)) + + def test_schedule_segment_two_agents_per_segment(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + net_id = self._create_network() + seg_id = self._create_segment(net_id) + helpers.register_dhcp_agent(HOST_C) + helpers.register_dhcp_agent(HOST_D) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg_id}) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_D, {seg_id}) + net = self.plugin.get_network(self.ctx, net_id) + seg = self.segments_plugin.get_segment(self.ctx, seg_id) + net['candidate_hosts'] = seg['hosts'] + agents = self.plugin.network_scheduler.schedule( + self.plugin, self.ctx, net) + self.assertEqual(2, len(agents)) + self.assertIn(agents[0].host, [HOST_C, HOST_D]) + self.assertIn(agents[1].host, [HOST_C, HOST_D]) + + def test_schedule_segment_two_agents_per_segment_one_hostable_agent(self): + cfg.CONF.set_override('dhcp_agents_per_network', 2) + net_id = self._create_network() + seg_id = self._create_segment(net_id) + helpers.register_dhcp_agent(HOST_C) + helpers.register_dhcp_agent(HOST_D) + segments_service_db.update_segment_host_mapping( + self.ctx, HOST_C, {seg_id}) + net = self.plugin.get_network(self.ctx, net_id) + seg = self.segments_plugin.get_segment(self.ctx, seg_id) + net['candidate_hosts'] = seg['hosts'] + agents = self.plugin.network_scheduler.schedule( + self.plugin, self.ctx, net) + self.assertEqual(1, len(agents)) + self.assertEqual(HOST_C, agents[0].host) + class TestDhcpSchedulerFilter(TestDhcpSchedulerBaseTestCase, sched_db.DhcpAgentSchedulerDbMixin): @@ -518,13 +715,16 @@ 'host-c', 'host-d'}, networks=networks) + def test_get_dhcp_agents_host_network_filter_by_hosts(self): + self._test_get_dhcp_agents_hosting_networks({'host-a'}, + hosts=['host-a']) + class DHCPAgentAZAwareWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): def setUp(self): super(DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp() - DB_PLUGIN_KLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin' - self.setup_coreplugin(DB_PLUGIN_KLASS) + self.setup_coreplugin('ml2') cfg.CONF.set_override("network_scheduler_driver", 'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler') self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py neutron-9.0.0~b3~dev557/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py 2016-06-22 13:41:08.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py 2016-08-29 20:05:49.000000000 +0000 @@ -26,7 +26,6 @@ from sqlalchemy import orm import testscenarios -from neutron.common import constants as n_const from neutron import context as n_context from neutron.db import agents_db from neutron.db import db_base_plugin_v2 as db_v2 @@ -286,18 +285,18 @@ def _register_l3_agents(self, plugin=None): self.agent1 = helpers.register_l3_agent( - 'host_1', n_const.L3_AGENT_MODE_LEGACY) + 'host_1', constants.L3_AGENT_MODE_LEGACY) self.agent_id1 = self.agent1.id self.agent2 = helpers.register_l3_agent( - 'host_2', n_const.L3_AGENT_MODE_LEGACY) + 'host_2', constants.L3_AGENT_MODE_LEGACY) self.agent_id2 = self.agent2.id def _register_l3_dvr_agents(self): self.l3_dvr_agent = helpers.register_l3_agent( - HOST_DVR, n_const.L3_AGENT_MODE_DVR) + HOST_DVR, constants.L3_AGENT_MODE_DVR) self.l3_dvr_agent_id = self.l3_dvr_agent.id self.l3_dvr_snat_agent = helpers.register_l3_agent( - HOST_DVR_SNAT, n_const.L3_AGENT_MODE_DVR_SNAT) + HOST_DVR_SNAT, constants.L3_AGENT_MODE_DVR_SNAT) self.l3_dvr_snat_id = self.l3_dvr_snat_agent.id def _set_l3_agent_admin_state(self, context, agent_id, state=True): @@ -863,8 +862,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): def setUp(self): - plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' - self.setup_coreplugin(plugin) + self.setup_coreplugin('ml2') super(L3DvrSchedulerTestCase, self).setUp() self.adminContext = n_context.get_admin_context() self.dut = L3DvrScheduler() @@ -1025,7 +1023,7 @@ 'context': self.adminContext, 'original_port': None, 'port': { - 'device_owner': 'network:None', + 'device_owner': constants.DEVICE_OWNER_NETWORK_PREFIX + 'None', } } l3plugin = mock.Mock() @@ -1374,7 +1372,7 @@ dvr_port = { 'id': 'dvr_port1', 'device_id': 'r1', - 'device_owner': 'network:router_interface_distributed', + 'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE, 'fixed_ips': [] } r1 = { @@ -1418,7 +1416,7 @@ mock.patch('neutron.common.rpc.get_client').start() self.plugin = L3HAPlugin() - self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') + self.setup_coreplugin('ml2') cfg.CONF.set_override('service_plugins', ['neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin']) @@ -1675,6 +1673,34 @@ def test_auto_schedule_all_routers_when_agent_added(self): self._auto_schedule_when_agent_added(False) + def test_auto_schedule_ha_router_when_incompatible_agent_exist(self): + handle_internal_only_routers_agent = helpers.register_l3_agent( + 'host_3', constants.L3_AGENT_MODE_LEGACY, internal_only=False) + router = self._create_ha_router() + + self.plugin.auto_schedule_routers( + self.adminContext, handle_internal_only_routers_agent.host, []) + agents = self.plugin.get_l3_agents_hosting_routers( + self.adminContext, [router['id']], + admin_state_up=True) + agent_ids = [agent['id'] for agent in agents] + self.assertEqual(2, len(agents)) + self.assertNotIn(handle_internal_only_routers_agent.id, agent_ids) + + def test_auto_schedule_ha_router_when_dvr_agent_exist(self): + dvr_agent = helpers.register_l3_agent( + HOST_DVR, constants.L3_AGENT_MODE_DVR) + router = self._create_ha_router() + + self.plugin.auto_schedule_routers(self.adminContext, dvr_agent.host, + []) + agents = self.plugin.get_l3_agents_hosting_routers( + self.adminContext, [router['id']], + admin_state_up=True) + agent_ids = [agent['id'] for agent in agents] + self.assertEqual(2, len(agents)) + self.assertNotIn(dvr_agent.id, agent_ids) + def _auto_schedule_when_agent_added(self, specific_router): router = self._create_ha_router() agents = self.plugin.get_l3_agents_hosting_routers( @@ -1821,7 +1847,7 @@ def setUp(self): super(TestGetL3AgentsWithAgentModeFilter, self).setUp() self.plugin = L3HAPlugin() - self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') + self.setup_coreplugin('ml2') self.adminContext = n_context.get_admin_context() hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5'] agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy'] diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/auto_allocate/test_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/auto_allocate/test_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/auto_allocate/test_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/auto_allocate/test_db.py 2016-08-29 20:05:49.000000000 +0000 @@ -13,7 +13,10 @@ import mock from neutron_lib import exceptions as n_exc +from oslo_db import exception as db_exc +from oslo_utils import uuidutils +from neutron.common import exceptions as c_exc from neutron import context from neutron.services.auto_allocate import db from neutron.services.auto_allocate import exceptions @@ -27,6 +30,7 @@ self.ctx = context.get_admin_context() self.mixin = db.AutoAllocatedTopologyMixin() self.mixin._l3_plugin = mock.Mock() + self.mixin._core_plugin = mock.Mock() def test__provision_external_connectivity_expected_cleanup(self): """Test that the right resources are cleaned up.""" @@ -58,6 +62,37 @@ self.mixin.get_auto_allocated_topology, self.ctx, mock.ANY, fields=['foo']) + def test__provision_tenant_private_network_handles_subnet_errors(self): + network_id = uuidutils.generate_uuid() + self.mixin._core_plugin.create_network.return_value = ( + {'id': network_id}) + self.mixin._core_plugin.create_subnet.side_effect = ( + c_exc.SubnetAllocationError(reason='disaster')) + with mock.patch.object(self.mixin, "_get_supported_subnetpools") as f,\ + mock.patch.object(self.mixin, "_cleanup") as g: + f.return_value = ( + [{'ip_version': 4, "id": uuidutils.generate_uuid()}]) + self.assertRaises(exceptions.AutoAllocationFailure, + self.mixin._provision_tenant_private_network, + self.ctx, 'foo_tenant') + g.assert_called_once_with(self.ctx, network_id) + + def _test__build_topology(self, exception): + with mock.patch.object(self.mixin, + '_provision_tenant_private_network', + side_effect=exception), \ + mock.patch.object(self.mixin, '_cleanup') as f: + self.assertRaises(exception, + self.mixin._build_topology, + self.ctx, mock.ANY, 'foo_net') + return f.call_count + + def test__build_topology_retriable_exception(self): + self.assertTrue(self._test__build_topology(db_exc.DBConnectionError)) + + def test__build_topology_non_retriable_exception(self): + self.assertFalse(self._test__build_topology(Exception)) + def test__check_requirements_fail_on_missing_ext_net(self): self.assertRaises(exceptions.AutoAllocationFailure, self.mixin._check_requirements, self.ctx, 'foo_tenant') @@ -79,3 +114,17 @@ result = self.mixin._check_requirements(self.ctx, 'foo_tenant') expected = {'id': 'dry-run=pass', 'tenant_id': 'foo_tenant'} self.assertEqual(expected, result) + + def test__cleanup_handles_failures(self): + retry_then_notfound = ( + [db_exc.RetryRequest(ValueError())] + + [n_exc.NotFound()] * 10 + ) + self.mixin._l3_plugin.remove_router_interface.side_effect = ( + retry_then_notfound) + self.mixin._l3_plugin.delete_router.side_effect = ( + retry_then_notfound) + self.mixin._core_plugin.delete_network.side_effect = ( + retry_then_notfound) + self.mixin._cleanup(self.ctx, network_id=44, router_id=45, + subnets=[{'id': 46}]) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,128 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from mock import patch +from neutron_lib import constants +from neutron_lib import exceptions as lib_exc +import testtools + +from neutron import context +from neutron import manager +from neutron.plugins.common import constants as p_cons +from neutron.services.l3_router.service_providers import driver_controller +from neutron.services import provider_configuration +from neutron.tests import base +from neutron.tests.unit import testlib_api + + +class TestDriverController(testlib_api.SqlTestCase): + + def setUp(self): + super(TestDriverController, self).setUp() + self.fake_l3 = mock.Mock() + self.dc = driver_controller.DriverController(self.fake_l3) + self.ctx = context.get_admin_context() + + def _return_provider_for_flavor(self, provider): + self.dc._flavor_plugin_ref = mock.Mock() + self.dc._flavor_plugin_ref.get_flavor.return_value = {'id': 'abc'} + provider = {'provider': provider} + self.dc._flavor_plugin_ref.get_flavor_next_provider.return_value = [ + provider] + + def test__set_router_provider_flavor_specified(self): + self._return_provider_for_flavor('dvrha') + router_db = mock.Mock() + router = dict(id='router_id', flavor_id='abc123') + self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, + self.ctx, router, router_db) + self.assertEqual('abc123', router_db.flavor_id) + self.assertEqual(self.dc.drivers['dvrha'], + self.dc._get_provider_for_router(self.ctx, + 'router_id')) + + def test__update_router_provider_invalid(self): + test_dc = driver_controller.DriverController(self.fake_l3) + with mock.patch.object(test_dc, "_get_provider_for_router"): + with mock.patch.object( + driver_controller, + "_ensure_driver_supports_request") as _ensure: + _ensure.side_effect = lib_exc.Invalid(message='message') + self.assertRaises( + lib_exc.Invalid, + test_dc._update_router_provider, + None, None, None, None, + None, {'name': 'testname'}, + {'flavor_id': 'old_fid'}, None) + + def test__set_router_provider_attr_lookups(self): + # ensure correct drivers are looked up based on attrs + cases = [ + ('dvrha', dict(id='router_id1', distributed=True, ha=True)), + ('dvr', dict(id='router_id2', distributed=True, ha=False)), + ('ha', dict(id='router_id3', distributed=False, ha=True)), + ('single_node', dict(id='router_id4', distributed=False, + ha=False)), + ('ha', dict(id='router_id5', ha=True, + distributed=constants.ATTR_NOT_SPECIFIED)), + ('dvr', dict(id='router_id6', distributed=True, + ha=constants.ATTR_NOT_SPECIFIED)), + ('single_node', dict(id='router_id7', ha=False, + distributed=constants.ATTR_NOT_SPECIFIED)), + ('single_node', dict(id='router_id8', distributed=False, + ha=constants.ATTR_NOT_SPECIFIED)), + ('single_node', dict(id='router_id9', + distributed=constants.ATTR_NOT_SPECIFIED, + ha=constants.ATTR_NOT_SPECIFIED)), + ] + for driver, body in cases: + self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, + self.ctx, body, mock.Mock()) + self.assertEqual(self.dc.drivers[driver], + self.dc._get_provider_for_router(self.ctx, + body['id']), + 'Expecting %s for body %s' % (driver, body)) + + def test__clear_router_provider(self): + # ensure correct drivers are looked up based on attrs + body = dict(id='router_id1', distributed=True, ha=True) + self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self, + self.ctx, body, mock.Mock()) + self.assertEqual(self.dc.drivers['dvrha'], + self.dc._get_provider_for_router(self.ctx, + body['id'])) + self.dc._clear_router_provider('router', 'PRECOMMIT_DELETE', self, + self.ctx, body['id']) + with testtools.ExpectedException(ValueError): + # if association was cleared, get_router will be called + self.fake_l3.get_router.side_effect = ValueError + self.dc._get_provider_for_router(self.ctx, body['id']) + + @patch.object(manager.NeutronManager, "get_service_plugins") + def test__flavor_plugin(self, get_service_plugins): + _fake_flavor_plugin = mock.sentinel.fla_plugin + get_service_plugins.return_value = {p_cons.FLAVORS: + _fake_flavor_plugin} + _dc = driver_controller.DriverController(self.fake_l3) + self.assertEqual(_fake_flavor_plugin, _dc._flavor_plugin) + + +class Test_LegacyPlusProviderConfiguration(base.BaseTestCase): + + @mock.patch.object(provider_configuration.ProviderConfiguration, + "add_provider") + def test__update_router_provider_invalid(self, mock_method): + mock_method.side_effect = lib_exc.Invalid(message='message') + driver_controller._LegacyPlusProviderConfiguration() diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/metering/agents/test_metering_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/metering/agents/test_metering_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/metering/agents/test_metering_agent.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/metering/agents/test_metering_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -18,6 +18,7 @@ from oslo_utils import timeutils from oslo_utils import uuidutils +from neutron.conf.services import metering_agent as metering_agent_config from neutron.services.metering.agents import metering_agent from neutron.tests import base from neutron.tests import fake_notifier @@ -49,7 +50,7 @@ def setUp(self): super(TestMeteringOperations, self).setUp() - cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts) + metering_agent_config.register_metering_agent_opts() self.noop_driver = ('neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver') @@ -228,7 +229,7 @@ class TestMeteringDriver(base.BaseTestCase): def setUp(self): super(TestMeteringDriver, self).setUp() - cfg.CONF.register_opts(metering_agent.MeteringAgent.Opts) + metering_agent_config.register_metering_agent_opts() self.noop_driver = ('neutron.services.metering.drivers.noop.' 'noop_driver.NoopMeteringDriver') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/qos/notification_drivers/test_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/qos/notification_drivers/test_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/qos/notification_drivers/test_manager.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/qos/notification_drivers/test_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -15,6 +15,7 @@ from oslo_utils import uuidutils from neutron.api.rpc.callbacks import events +from neutron.conf.services import qos_driver_manager as driver_mgr_config from neutron import context from neutron.objects.qos import policy as policy_object from neutron.services.qos.notification_drivers import manager as driver_mgr @@ -39,7 +40,7 @@ self.config_parse() self.setup_coreplugin() config = cfg.ConfigOpts() - config.register_opts(driver_mgr.QOS_PLUGIN_OPTS, "qos") + driver_mgr_config.register_qos_plugin_opts(config) self.policy_data = {'policy': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), @@ -66,7 +67,7 @@ self.driver_manager = driver_mgr.QosServiceNotificationDriverManager() def _validate_registry_params(self, event_type, policy): - self.rpc_api.push.assert_called_with(self.context, policy, + self.rpc_api.push.assert_called_with(self.context, [policy], event_type) def test_create_policy_default_configuration(self): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py 2016-08-03 20:10:34.000000000 +0000 @@ -53,7 +53,7 @@ **self.rule_data['bandwidth_limit_rule']) def _validate_push_params(self, event_type, policy): - self.rpc_api.push.assert_called_once_with(self.context, policy, + self.rpc_api.push.assert_called_once_with(self.context, [policy], event_type) def test_create_policy(self): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/revisions/test_revision_plugin.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/revisions/test_revision_plugin.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/revisions/test_revision_plugin.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/revisions/test_revision_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,123 @@ +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import netaddr + +from neutron import context as nctx +from neutron.db import models_v2 +from neutron import manager +from neutron.tests.unit.plugins.ml2 import test_plugin + + +class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase): + + def get_additional_service_plugins(self): + p = super(TestRevisionPlugin, self).get_additional_service_plugins() + p.update({'revision_plugin_name': 'revisions'}) + return p + + def setUp(self): + super(TestRevisionPlugin, self).setUp() + self.cp = manager.NeutronManager.get_plugin() + self.l3p = (manager.NeutronManager. + get_service_plugins()['L3_ROUTER_NAT']) + self.ctx = nctx.get_admin_context() + + def test_handle_expired_object(self): + rp = manager.NeutronManager.get_service_plugins()['revision_plugin'] + with self.port(): + with self.ctx.session.begin(): + ipal_obj = self.ctx.session.query(models_v2.IPAllocation).one() + # load port into our session + port_obj = self.ctx.session.query(models_v2.Port).one() + # simulate concurrent delete in another session + nctx.get_admin_context().session.query(models_v2.Port).delete() + # expire the port so the revision bumping code will trigger a + # lookup on its attributes and encounter an ObjectDeletedError + self.ctx.session.expire(port_obj) + rp._bump_related_revisions(self.ctx.session, ipal_obj) + + def test_port_name_update_revises(self): + with self.port() as port: + rev = port['port']['revision'] + new = {'port': {'name': 'seaweed'}} + response = self._update('ports', port['port']['id'], new) + new_rev = response['port']['revision'] + self.assertGreater(new_rev, rev) + + def test_port_ip_update_revises(self): + with self.port() as port: + rev = port['port']['revision'] + new = {'port': {'fixed_ips': port['port']['fixed_ips']}} + # ensure adding an IP allocation updates the port + next_ip = str(netaddr.IPAddress( + new['port']['fixed_ips'][0]['ip_address']) + 1) + new['port']['fixed_ips'].append({'ip_address': next_ip}) + response = self._update('ports', port['port']['id'], new) + self.assertEqual(2, len(response['port']['fixed_ips'])) + new_rev = response['port']['revision'] + self.assertGreater(new_rev, rev) + # ensure deleting an IP allocation updates the port + rev = new_rev + new['port']['fixed_ips'].pop() + response = self._update('ports', port['port']['id'], new) + self.assertEqual(1, len(response['port']['fixed_ips'])) + new_rev = response['port']['revision'] + self.assertGreater(new_rev, rev) + + def test_security_group_rule_ops_bump_security_group(self): + s = {'security_group': {'tenant_id': 'some_tenant', 'name': '', + 'description': 's'}} + sg = self.cp.create_security_group(self.ctx, s) + s['security_group']['name'] = 'hello' + updated = self.cp.update_security_group(self.ctx, sg['id'], s) + self.assertGreater(updated['revision'], sg['revision']) + # ensure rule changes bump parent SG + r = {'security_group_rule': {'tenant_id': 'some_tenant', + 'port_range_min': 80, 'protocol': 6, + 'port_range_max': 90, + 'remote_ip_prefix': '0.0.0.0/0', + 'ethertype': 'IPv4', + 'remote_group_id': None, + 'direction': 'ingress', + 'security_group_id': sg['id']}} + rule = self.cp.create_security_group_rule(self.ctx, r) + sg = updated + updated = self.cp.get_security_group(self.ctx, sg['id']) + self.assertGreater(updated['revision'], sg['revision']) + self.cp.delete_security_group_rule(self.ctx, rule['id']) + sg = updated + updated = self.cp.get_security_group(self.ctx, sg['id']) + self.assertGreater(updated['revision'], sg['revision']) + + def test_router_interface_ops_bump_router(self): + r = {'router': {'name': 'myrouter', 'tenant_id': 'some_tenant', + 'admin_state_up': True}} + router = self.l3p.create_router(self.ctx, r) + r['router']['name'] = 'yourrouter' + updated = self.l3p.update_router(self.ctx, router['id'], r) + self.assertGreater(updated['revision'], router['revision']) + # add an intf and make sure it bumps rev + with self.subnet(tenant_id='some_tenant') as s: + interface_info = {'subnet_id': s['subnet']['id']} + self.l3p.add_router_interface(self.ctx, router['id'], interface_info) + router = updated + updated = self.l3p.get_router(self.ctx, router['id']) + self.assertGreater(updated['revision'], router['revision']) + self.l3p.remove_router_interface(self.ctx, router['id'], + interface_info) + router = updated + updated = self.l3p.get_router(self.ctx, router['id']) + self.assertGreater(updated['revision'], router['revision']) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,40 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib import constants +from oslo_config import cfg + +from neutron.services.trunk.drivers.linuxbridge import driver +from neutron.tests import base + + +class LinuxBridgeDriverTestCase(base.BaseTestCase): + + def test_driver_is_loaded(self): + inst = driver.LinuxBridgeDriver.create() + cfg.CONF.set_override('mechanism_drivers', + ['a', 'b', 'linuxbridge'], group='ml2') + self.assertTrue(inst.is_loaded) + cfg.CONF.set_override('mechanism_drivers', + ['a', 'b'], group='ml2') + self.assertFalse(inst.is_loaded) + cfg.CONF.set_override('core_plugin', 'my_foo_plugin') + self.assertFalse(inst.is_loaded) + + def test_driver_properties(self): + inst = driver.LinuxBridgeDriver.create() + self.assertEqual(driver.NAME, inst.name) + self.assertEqual(driver.SUPPORTED_INTERFACES, inst.interfaces) + self.assertEqual(driver.SUPPORTED_SEGMENTATION_TYPES, + inst.segmentation_types) + self.assertEqual(constants.AGENT_TYPE_LINUXBRIDGE, inst.agent_type) + self.assertTrue(inst.can_trunk_bound_port) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,28 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.rpc.callbacks import resources +from neutron.services.trunk.drivers.openvswitch.agent import driver +from neutron.tests import base + + +class OvsTrunkSkeletonTest(base.BaseTestCase): + + @mock.patch("neutron.api.rpc.callbacks.resource_manager." + "ConsumerResourceCallbacksManager.unregister") + def test___init__(self, mocked_unregister): + test_obj = driver.OVSTrunkSkeleton() + mocked_unregister.assert_called_with(test_obj.handle_trunks, + resources.TRUNK) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,67 @@ +# Copyright (c) 2016 Red Hat +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from oslo_utils import uuidutils +import testtools + +from neutron.common import utils as common_utils +from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager +from neutron.tests import base + +NATIVE_OVSDB_CONNECTION = ( + 'neutron.agent.ovsdb.impl_idl.OvsdbIdl.ovsdb_connection') + + +class TrunkParentPortTestCase(base.BaseTestCase): + def setUp(self): + super(TrunkParentPortTestCase, self).setUp() + # Mock out connecting to ovsdb + mock.patch(NATIVE_OVSDB_CONNECTION).start() + trunk_id = uuidutils.generate_uuid() + port_id = uuidutils.generate_uuid() + trunk_mac = common_utils.get_random_mac('fa:16:3e:00:00:00'.split(':')) + self.trunk = trunk_manager.TrunkParentPort( + trunk_id, port_id, trunk_mac) + + def test_multiple_transactions(self): + def method_inner(trunk): + with trunk.ovsdb_transaction() as txn: + return id(txn) + + def method_outer(trunk): + with trunk.ovsdb_transaction() as txn: + return method_inner(trunk), id(txn) + + with self.trunk.ovsdb_transaction() as txn1: + mock_commit = mock.patch.object(txn1, 'commit').start() + txn_inner_id, txn_outer_id = method_outer(self.trunk) + self.assertFalse(mock_commit.called) + self.assertTrue(mock_commit.called) + self.assertTrue(id(txn1) == txn_inner_id == txn_outer_id) + + def test_transaction_raises_error(self): + class MyException(Exception): + pass + + with testtools.ExpectedException(MyException): + with self.trunk.ovsdb_transaction() as txn1: + mock.patch.object(txn1, 'commit').start() + raise MyException() + self.assertIsNone(self.trunk._transaction) + with self.trunk.ovsdb_transaction() as txn2: + mock.patch.object(txn2, 'commit').start() + self.assertIsNot(txn1, txn2) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,63 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron_lib import constants +from oslo_config import cfg + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.plugins.ml2.drivers.openvswitch.agent.common import ( + constants as agent_consts) +from neutron.services.trunk.drivers.openvswitch import driver +from neutron.tests import base + + +class OVSDriverTestCase(base.BaseTestCase): + + def test_driver_creation(self): + ovs_driver = driver.OVSDriver.create() + self.assertFalse(ovs_driver.is_loaded) + self.assertEqual(driver.NAME, ovs_driver.name) + self.assertEqual(driver.SUPPORTED_INTERFACES, ovs_driver.interfaces) + self.assertEqual(driver.SUPPORTED_SEGMENTATION_TYPES, + ovs_driver.segmentation_types) + self.assertEqual(constants.AGENT_TYPE_OVS, ovs_driver.agent_type) + self.assertFalse(ovs_driver.can_trunk_bound_port) + self.assertTrue( + ovs_driver.is_agent_compatible(constants.AGENT_TYPE_OVS)) + self.assertTrue( + ovs_driver.is_interface_compatible(driver.SUPPORTED_INTERFACES[0])) + + def test_driver_is_loaded(self): + cfg.CONF.set_override('mechanism_drivers', + 'openvswitch', group='ml2') + ovs_driver = driver.OVSDriver.create() + self.assertTrue(ovs_driver.is_loaded) + + def test_driver_is_not_loaded(self): + cfg.CONF.set_override('core_plugin', 'my_foo_plugin') + ovs_driver = driver.OVSDriver.create() + self.assertFalse(ovs_driver.is_loaded) + + @mock.patch('neutron.services.trunk.utils.gen_trunk_br_name') + def test_vif_details_bridge_name_handler_registration(self, + mock_gen_br_name): + driver.register() + mock_gen_br_name.return_value = 'fake-trunk-br-name' + test_trigger = mock.Mock() + registry.notify(agent_consts.OVS_BRIDGE_NAME, events.BEFORE_READ, + test_trigger, **{'port': {'trunk_details': + {'trunk_id': 'foo'}}}) + test_trigger.assert_called_once_with('fake-trunk-br-name') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/fakes.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/fakes.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/fakes.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/fakes.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from neutron.services.trunk.drivers import base + + +class FakeDriver(base.DriverBase): + + @property + def is_loaded(self): + return True + + @classmethod + def create(cls): + return cls('foo_name', ('foo_intfs',), ('foo_seg_types',)) + + +class FakeDriver2(base.DriverBase): + + @property + def is_loaded(self): + return True + + @classmethod + def create(cls): + return cls('foo_name2', ('foo_intf2',), ('foo_seg_types',)) + + +class FakeDriverCanTrunkBoundPort(base.DriverBase): + + @property + def is_loaded(self): + return True + + @classmethod + def create(cls): + return cls('foo_name3', ('foo_intfs',), + ('foo_seg_types',), can_trunk_bound_port=True) + + +class FakeDriverWithAgent(base.DriverBase): + + @property + def is_loaded(self): + return True + + @classmethod + def create(cls): + return cls('foo_name4', ('foo_intfs',), ('foo_seg_types',), "foo_type") diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/rpc/test_agent.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/rpc/test_agent.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/rpc/test_agent.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/rpc/test_agent.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,48 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +import oslo_messaging + +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron.services.trunk.rpc import agent +from neutron.tests import base + + +class TrunkSkeletonTest(base.BaseTestCase): + # TODO(fitoduarte): add more test to improve coverage of module + @mock.patch("neutron.api.rpc.callbacks.resource_manager." + "ConsumerResourceCallbacksManager.register") + @mock.patch("neutron.common.rpc.get_server") + def test___init__(self, mocked_get_server, mocked_register): + test_obj = agent.TrunkSkeleton() + self.assertEqual(2, mocked_register.call_count) + calls = [mock.call(test_obj.handle_trunks, resources.TRUNK), + mock.call(test_obj.handle_subports, resources.SUBPORT)] + mocked_register.assert_has_calls(calls, any_order=True) + + # Test to see if the call to rpc.get_server has the correct + # target and the correct endpoints + topic = resources_rpc.resource_type_versioned_topic(resources.SUBPORT) + subport_target = oslo_messaging.Target( + topic=topic, server=cfg.CONF.host, fanout=True) + topic = resources_rpc.resource_type_versioned_topic(resources.TRUNK) + trunk_target = oslo_messaging.Target( + topic=topic, server=cfg.CONF.host, fanout=True) + calls = [mock.call(subport_target, mock.ANY), + mock.call(trunk_target, mock.ANY)] + mocked_get_server.assert_has_calls(calls, any_order=True) + self.assertIn("ResourcesPushRpcCallback", + str(mocked_get_server.call_args_list)) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/rpc/test_backend.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/rpc/test_backend.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/rpc/test_backend.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/rpc/test_backend.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,70 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.api.rpc.callbacks import resource_manager +from neutron.callbacks import events +from neutron.services.trunk import callbacks +from neutron.services.trunk import constants as trunk_consts +from neutron.services.trunk.rpc import backend +from neutron.tests import base + + +class ServerSideRpcBackendTest(base.BaseTestCase): + # TODO(fitoduarte): add more test to improve coverage of module + def setUp(self): + super(ServerSideRpcBackendTest, self).setUp() + self.register_mock = mock.patch.object( + resource_manager.ResourceCallbacksManager, "register").start() + + @mock.patch("neutron.callbacks.manager.CallbacksManager.subscribe") + def test___init__(self, mocked_subscribe): + test_obj = backend.ServerSideRpcBackend() + + calls = [mock.call(test_obj.process_event, + trunk_consts.TRUNK, + events.AFTER_CREATE), + mock.call(test_obj.process_event, + trunk_consts.TRUNK, + events.AFTER_DELETE), + mock.call(test_obj.process_event, + trunk_consts.SUBPORTS, + events.AFTER_CREATE), + mock.call(test_obj.process_event, + trunk_consts.SUBPORTS, + events.AFTER_DELETE) + ] + mocked_subscribe.assert_has_calls(calls, any_order=True) + + def test_process_event(self): + test_obj = backend.ServerSideRpcBackend() + test_obj._stub = mock_stub = mock.Mock() + trunk_plugin = mock.Mock() + + test_obj.process_event( + trunk_consts.TRUNK, events.AFTER_CREATE, trunk_plugin, + callbacks.TrunkPayload("context", + "id", + current_trunk="current_trunk")) + test_obj.process_event( + trunk_consts.TRUNK, events.AFTER_DELETE, trunk_plugin, + callbacks.TrunkPayload("context", + "id", + original_trunk="original_trunk")) + + calls = [mock.call.trunk_created("context", + "current_trunk"), + mock.call.trunk_deleted("context", + "original_trunk")] + mock_stub.assert_has_calls(calls, any_order=False) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/rpc/test_server.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/rpc/test_server.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/rpc/test_server.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/rpc/test_server.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,148 @@ +# Copyright 2016 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +import oslo_messaging + +from neutron.api.rpc.callbacks import events +from neutron.api.rpc.callbacks import resources +from neutron.api.rpc.handlers import resources_rpc +from neutron.extensions import portbindings +from neutron import manager +from neutron.objects import trunk as trunk_obj +from neutron.services.trunk import constants +from neutron.services.trunk import drivers +from neutron.services.trunk import plugin as trunk_plugin +from neutron.services.trunk.rpc import constants as rpc_consts +from neutron.services.trunk.rpc import server +from neutron.tests import base +from neutron.tests.unit.plugins.ml2 import test_plugin + + +class TrunkSkeletonTest(test_plugin.Ml2PluginV2TestCase): + def setUp(self): + super(TrunkSkeletonTest, self).setUp() + self.drivers_patch = mock.patch.object(drivers, 'register').start() + self.compat_patch = mock.patch.object( + trunk_plugin.TrunkPlugin, 'check_compatibility').start() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + self.trunk_plugin.add_segmentation_type('vlan', lambda x: True) + self.core_plugin = manager.NeutronManager.get_plugin() + + def _create_test_trunk(self, port, subports=None): + subports = subports if subports else [] + trunk = {'port_id': port['port']['id'], + 'tenant_id': 'test_tenant', + 'sub_ports': subports + } + response = ( + self.trunk_plugin.create_trunk(self.context, {'trunk': trunk})) + return response + + @mock.patch("neutron.api.rpc.callbacks.resource_manager." + "ResourceCallbacksManager.register") + @mock.patch("neutron.common.rpc.get_server") + def test___init__(self, mocked_get_server, mocked_registered): + test_obj = server.TrunkSkeleton() + mocked_registered.assert_called_with(server.trunk_by_port_provider, + resources.TRUNK) + trunk_target = oslo_messaging.Target(topic=rpc_consts.TRUNK_BASE_TOPIC, + server=cfg.CONF.host, + fanout=False) + mocked_get_server.assert_called_with(trunk_target, [test_obj]) + + def test_update_subport_bindings(self): + with self.port() as _parent_port: + parent_port = _parent_port + trunk = self._create_test_trunk(parent_port) + port_data = {portbindings.HOST_ID: 'trunk_host_id'} + self.core_plugin.update_port( + self.context, parent_port['port']['id'], {'port': port_data}) + subports = [] + for vid in range(0, 3): + with self.port() as new_port: + obj = trunk_obj.SubPort( + context=self.context, + trunk_id=trunk['id'], + port_id=new_port['port']['id'], + segmentation_type='vlan', + segmentation_id=vid) + subports.append(obj) + + test_obj = server.TrunkSkeleton() + test_obj._trunk_plugin = self.trunk_plugin + test_obj._core_plugin = self.core_plugin + updated_subports = test_obj.update_subport_bindings(self.context, + subports=subports) + self.assertIn(trunk['id'], updated_subports) + for port in updated_subports[trunk['id']]: + self.assertEqual('trunk_host_id', port[portbindings.HOST_ID]) + + @mock.patch('neutron.api.rpc.callbacks.producer.registry.provide') + def test_update_trunk_status(self, _): + with self.port() as _parent_port: + parent_port = _parent_port + trunk = self._create_test_trunk(parent_port) + trunk_id = trunk['id'] + + test_obj = server.TrunkSkeleton() + test_obj._trunk_plugin = self.trunk_plugin + self.assertEqual(constants.PENDING_STATUS, trunk['status']) + test_obj.update_trunk_status(self.context, + trunk_id, + constants.ACTIVE_STATUS) + updated_trunk = self.trunk_plugin.get_trunk(self.context, trunk_id) + self.assertEqual(constants.ACTIVE_STATUS, updated_trunk['status']) + + +class TrunkStubTest(base.BaseTestCase): + def setUp(self): + super(TrunkStubTest, self).setUp() + self.test_obj = server.TrunkStub() + + def test___init__(self): + self.assertIsInstance(self.test_obj._resource_rpc, + resources_rpc.ResourcesPushRpcApi) + + @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." + "push") + def test_trunk_created(self, mocked_push): + m_context = mock.Mock() + m_trunk = mock.Mock() + self.test_obj.trunk_created(m_context, m_trunk) + mocked_push.assert_called_with(m_context, [m_trunk], events.CREATED) + + @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." + "push") + def test_trunk_deleted(self, mocked_push): + m_context = mock.Mock() + m_trunk = mock.Mock() + self.test_obj.trunk_deleted(m_context, m_trunk) + mocked_push.assert_called_with(m_context, [m_trunk], events.DELETED) + + @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." + "push") + def test_subports_added(self, mocked_push): + m_context = mock.Mock() + m_subports = mock.Mock() + self.test_obj.subports_added(m_context, m_subports) + mocked_push.assert_called_with(m_context, m_subports, events.CREATED) + + @mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi." + "push") + def test_subports_deleted(self, mocked_push): + m_context = mock.Mock() + m_subports = mock.Mock() + self.test_obj.subports_deleted(m_context, m_subports) + mocked_push.assert_called_with(m_context, m_subports, events.DELETED) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_db.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_db.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_db.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_db.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,50 +0,0 @@ -# Copyright 2016 Hewlett Packard Enterprise Development Company, LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from neutron import context -from neutron.db import models_v2 -from neutron.services.trunk import db -from neutron.services.trunk import exceptions -from neutron.tests.unit import testlib_api - - -class TrunkDBTestCase(testlib_api.SqlTestCase): - - def setUp(self): - super(TrunkDBTestCase, self).setUp() - self.ctx = context.get_admin_context() - - def _add_network(self, net_id): - with self.ctx.session.begin(subtransactions=True): - self.ctx.session.add(models_v2.Network(id=net_id)) - - def _add_port(self, net_id, port_id): - with self.ctx.session.begin(subtransactions=True): - port = models_v2.Port(id=port_id, - network_id=net_id, - mac_address='foo_mac_%s' % port_id, - admin_state_up=True, - status='DOWN', - device_id='', - device_owner='') - self.ctx.session.add(port) - - def test_create_trunk_raise_port_in_use(self): - self._add_network('foo_net') - self._add_port('foo_net', 'foo_port') - db.create_trunk(self.ctx, 'foo_port') - self.assertRaises(exceptions.TrunkPortInUse, - db.create_trunk, - self.ctx, 'foo_port') diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_plugin.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_plugin.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_plugin.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_plugin.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,287 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company, LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import testtools + +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron import manager +from neutron.objects import trunk as trunk_objects +from neutron.services.trunk import callbacks +from neutron.services.trunk import constants +from neutron.services.trunk import drivers +from neutron.services.trunk import exceptions as trunk_exc +from neutron.services.trunk import plugin as trunk_plugin +from neutron.tests.unit.plugins.ml2 import test_plugin +from neutron.tests.unit.services.trunk import fakes + + +def create_subport_dict(port_id): + return {'segmentation_type': 'vlan', + 'segmentation_id': 123, + 'port_id': port_id} + + +def register_mock_callback(resource, event): + callback = mock.Mock() + registry.subscribe(callback, resource, event) + return callback + + +class TrunkPluginTestCase(test_plugin.Ml2PluginV2TestCase): + + def setUp(self): + super(TrunkPluginTestCase, self).setUp() + self.drivers_patch = mock.patch.object(drivers, 'register').start() + self.compat_patch = mock.patch.object( + trunk_plugin.TrunkPlugin, 'check_compatibility').start() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + self.trunk_plugin.add_segmentation_type('vlan', lambda x: True) + + def _create_test_trunk(self, port, subports=None): + subports = subports if subports else [] + trunk = {'port_id': port['port']['id'], + 'tenant_id': 'test_tenant', + 'sub_ports': subports} + response = ( + self.trunk_plugin.create_trunk(self.context, {'trunk': trunk})) + return response + + def _get_trunk_obj(self, trunk_id): + return trunk_objects.Trunk.get_object(self.context, id=trunk_id) + + def _get_subport_obj(self, port_id): + subports = trunk_objects.SubPort.get_objects( + self.context, port_id=port_id) + return subports[0] + + def _test_delete_port_raise_in_use(self, parent_port, child_port, port_id, + exception): + subport = create_subport_dict(child_port['port']['id']) + self._create_test_trunk(parent_port, [subport]) + core_plugin = manager.NeutronManager.get_plugin() + self.assertRaises(exception, core_plugin.delete_port, + self.context, port_id) + + def test_delete_port_raise_in_use_by_trunk(self): + with self.port() as parent_port, self.port() as child_port: + self._test_delete_port_raise_in_use( + parent_port, child_port, parent_port['port']['id'], + trunk_exc.PortInUseAsTrunkParent) + + def test_delete_port_raise_in_use_by_subport(self): + with self.port() as parent_port, self.port() as child_port: + self._test_delete_port_raise_in_use( + parent_port, child_port, child_port['port']['id'], + trunk_exc.PortInUseAsSubPort) + + def test_delete_trunk_raise_in_use(self): + with self.port() as port: + trunk = self._create_test_trunk(port) + core_plugin = manager.NeutronManager.get_plugin() + port['port']['binding:host_id'] = 'host' + core_plugin.update_port(self.context, port['port']['id'], port) + self.assertRaises(trunk_exc.TrunkInUse, + self.trunk_plugin.delete_trunk, + self.context, trunk['id']) + + def _test_trunk_create_notify(self, event): + with self.port() as parent_port: + callback = register_mock_callback(constants.TRUNK, event) + trunk = self._create_test_trunk(parent_port) + trunk_obj = self._get_trunk_obj(trunk['id']) + payload = callbacks.TrunkPayload(self.context, trunk['id'], + current_trunk=trunk_obj) + callback.assert_called_once_with( + constants.TRUNK, event, self.trunk_plugin, payload=payload) + + def test_create_trunk_notify_after_create(self): + self._test_trunk_create_notify(events.AFTER_CREATE) + + def test_create_trunk_notify_precommit_create(self): + self._test_trunk_create_notify(events.PRECOMMIT_CREATE) + + def _test_trunk_update_notify(self, event): + with self.port() as parent_port: + callback = register_mock_callback(constants.TRUNK, event) + trunk = self._create_test_trunk(parent_port) + orig_trunk_obj = self._get_trunk_obj(trunk['id']) + trunk_req = {'trunk': {'name': 'foo'}} + self.trunk_plugin.update_trunk(self.context, trunk['id'], + trunk_req) + trunk_obj = self._get_trunk_obj(trunk['id']) + payload = callbacks.TrunkPayload(self.context, trunk['id'], + original_trunk=orig_trunk_obj, + current_trunk=trunk_obj) + callback.assert_called_once_with( + constants.TRUNK, event, self.trunk_plugin, payload=payload) + + def test_trunk_update_notify_after_update(self): + self._test_trunk_update_notify(events.AFTER_UPDATE) + + def test_trunk_update_notify_precommit_update(self): + self._test_trunk_update_notify(events.PRECOMMIT_UPDATE) + + def _test_trunk_delete_notify(self, event): + with self.port() as parent_port: + callback = register_mock_callback(constants.TRUNK, event) + trunk = self._create_test_trunk(parent_port) + trunk_obj = self._get_trunk_obj(trunk['id']) + self.trunk_plugin.delete_trunk(self.context, trunk['id']) + payload = callbacks.TrunkPayload(self.context, trunk['id'], + original_trunk=trunk_obj) + callback.assert_called_once_with( + constants.TRUNK, event, self.trunk_plugin, payload=payload) + + def test_delete_trunk_notify_after_delete(self): + self._test_trunk_delete_notify(events.AFTER_DELETE) + + def test_delete_trunk_notify_precommit_delete(self): + self._test_trunk_delete_notify(events.PRECOMMIT_DELETE) + + def _test_subport_action_empty_list_no_notify(self, event, subport_method): + with self.port() as parent_port: + trunk = self._create_test_trunk(parent_port) + callback = register_mock_callback(constants.SUBPORTS, event) + subport_method(self.context, trunk['id'], {'sub_ports': []}) + callback.assert_not_called() + + def _test_add_subports_no_notification(self, event): + self._test_subport_action_empty_list_no_notify( + event, self.trunk_plugin.add_subports) + + def test_add_subports_notify_after_create_empty_list(self): + self._test_add_subports_no_notification(events.AFTER_CREATE) + + def test_add_subports_notify_precommit_create_empty_list(self): + self._test_add_subports_no_notification(events.PRECOMMIT_CREATE) + + def _test_remove_subports_no_notification(self, event): + self._test_subport_action_empty_list_no_notify( + event, self.trunk_plugin.remove_subports) + + def test_remove_subports_notify_after_delete_empty_list(self): + self._test_remove_subports_no_notification(events.AFTER_DELETE) + + def test_remove_subports_notify_precommit_delete_empty_list(self): + self._test_remove_subports_no_notification(events.PRECOMMIT_DELETE) + + def _test_add_subports_notify(self, event): + with self.port() as parent_port, self.port() as child_port: + trunk = self._create_test_trunk(parent_port) + orig_trunk_obj = self._get_trunk_obj(trunk['id']) + subport = create_subport_dict(child_port['port']['id']) + callback = register_mock_callback(constants.SUBPORTS, event) + self.trunk_plugin.add_subports( + self.context, trunk['id'], {'sub_ports': [subport]}) + trunk_obj = self._get_trunk_obj(trunk['id']) + subport_obj = self._get_subport_obj(subport['port_id']) + payload = callbacks.TrunkPayload(self.context, trunk['id'], + current_trunk=trunk_obj, + original_trunk=orig_trunk_obj, + subports=[subport_obj]) + callback.assert_called_once_with( + constants.SUBPORTS, event, self.trunk_plugin, payload=payload) + + def test_add_subports_notify_after_create(self): + self._test_add_subports_notify(events.AFTER_CREATE) + + def test_add_subports_notify_precommit_create(self): + self._test_add_subports_notify(events.PRECOMMIT_CREATE) + + def _test_remove_subports_notify(self, event): + with self.port() as parent_port, self.port() as child_port: + subport = create_subport_dict(child_port['port']['id']) + trunk = self._create_test_trunk(parent_port, [subport]) + orig_trunk_obj = self._get_trunk_obj(trunk['id']) + callback = register_mock_callback(constants.SUBPORTS, event) + subport_obj = self._get_subport_obj(subport['port_id']) + self.trunk_plugin.remove_subports( + self.context, trunk['id'], {'sub_ports': [subport]}) + trunk_obj = self._get_trunk_obj(trunk['id']) + payload = callbacks.TrunkPayload(self.context, trunk['id'], + current_trunk=trunk_obj, + original_trunk=orig_trunk_obj, + subports=[subport_obj]) + callback.assert_called_once_with( + constants.SUBPORTS, event, self.trunk_plugin, payload=payload) + + def test_remove_subports_notify_after_delete(self): + self._test_remove_subports_notify(events.AFTER_DELETE) + + def test_remove_subports_notify_precommit_delete(self): + self._test_remove_subports_notify(events.PRECOMMIT_DELETE) + + def test_create_trunk_in_pending_state(self): + with self.port() as port: + trunk = self._create_test_trunk(port) + self.assertEqual( + constants.PENDING_STATUS, trunk['status']) + + def test_add_subports_trunk_in_error_state_raises(self): + with self.port() as port, self.port() as subport: + trunk = self._create_test_trunk(port) + trunk_obj = self._get_trunk_obj(trunk['id']) + trunk_obj.status = constants.ERROR_STATUS + trunk_obj.update() + s = create_subport_dict(subport['port']['id']) + self.assertRaises(trunk_exc.TrunkInErrorState, + self.trunk_plugin.add_subports, + self.context, trunk['id'], {'sub_ports': [s]}) + + def test_add_subports_trunk_goes_to_pending(self): + with self.port() as port, self.port() as subport: + trunk = self._create_test_trunk(port) + trunk_obj = self._get_trunk_obj(trunk['id']) + trunk_obj.status = constants.ACTIVE_STATUS + trunk_obj.update() + s = create_subport_dict(subport['port']['id']) + trunk = self.trunk_plugin.add_subports( + self.context, trunk['id'], {'sub_ports': [s]}) + self.assertEqual(constants.PENDING_STATUS, trunk['status']) + + def test_remove_subports_trunk_goes_to_pending(self): + with self.port() as port, self.port() as subport: + s = create_subport_dict(subport['port']['id']) + trunk = self._create_test_trunk(port, [s]) + trunk_obj = self._get_trunk_obj(trunk['id']) + trunk_obj.status = constants.ACTIVE_STATUS + trunk_obj.update() + trunk = self.trunk_plugin.remove_subports( + self.context, trunk['id'], + {'sub_ports': [{'port_id': subport['port']['id']}]}) + self.assertEqual(constants.PENDING_STATUS, trunk['status']) + + +class TrunkPluginDriversTestCase(test_plugin.Ml2PluginV2TestCase): + + def setUp(self): + super(TrunkPluginDriversTestCase, self).setUp() + mock.patch.object(drivers, 'register').start() + + def test_plugin_fails_to_start(self): + with testtools.ExpectedException( + trunk_exc.IncompatibleTrunkPluginConfiguration): + trunk_plugin.TrunkPlugin() + + def test_plugin_with_fake_driver(self): + fake_driver = fakes.FakeDriver.create() + plugin = trunk_plugin.TrunkPlugin() + self.assertTrue(fake_driver.is_loaded) + self.assertEqual(set([]), plugin.supported_agent_types) + self.assertEqual(set(['foo_intfs']), plugin.supported_interfaces) + self.assertEqual([fake_driver], plugin.registered_drivers) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_rules.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_rules.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_rules.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_rules.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,228 @@ +# Copyright 2016 Hewlett Packard Enterprise Development Company, LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import testtools + +from neutron_lib import exceptions as n_exc +from oslo_utils import uuidutils + +from neutron import manager +from neutron.plugins.common import utils +from neutron.services.trunk import constants +from neutron.services.trunk import drivers +from neutron.services.trunk import exceptions as trunk_exc +from neutron.services.trunk import plugin as trunk_plugin +from neutron.services.trunk import rules +from neutron.services.trunk import utils as trunk_utils +from neutron.tests import base +from neutron.tests.unit.plugins.ml2 import test_plugin +from neutron.tests.unit.services.trunk import fakes + + +class SubPortsValidatorTestCase(base.BaseTestCase): + + def setUp(self): + super(SubPortsValidatorTestCase, self).setUp() + self.segmentation_types = {constants.VLAN: utils.is_valid_vlan_tag} + self.context = mock.ANY + + def test_validate_subport_subport_and_trunk_shared_port_id(self): + shared_id = uuidutils.generate_uuid() + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': shared_id, + 'segmentation_type': 'vlan', + 'segmentation_id': 2}], + shared_id) + self.assertRaises(trunk_exc.ParentPortInUse, + validator.validate, self.context) + + def test_validate_subport_invalid_vlan_id(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'vlan', + 'segmentation_id': 5000}]) + self.assertRaises(n_exc.InvalidInput, + validator.validate, + self.context) + + def test_validate_subport_vlan_id_not_an_int(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'vlan', + 'segmentation_id': 'IamNotAnumber'}]) + self.assertRaises(n_exc.InvalidInput, + validator.validate, + self.context) + + def test_validate_subport_valid_vlan_id_as_string(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'vlan', + 'segmentation_id': '2'}]) + with mock.patch.object(rules.TrunkPortValidator, 'validate') as f: + validator.validate(self.context) + f.assert_called_once_with(self.context) + + def test_validate_subport_subport_invalid_segmenation_type(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'fake', + 'segmentation_id': 100}]) + self.assertRaises(n_exc.InvalidInput, + validator.validate, + self.context) + + def test_validate_subport_missing_segmenation_type(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_id': 100}]) + self.assertRaises(n_exc.InvalidInput, + validator.validate, + self.context) + + def test_validate_subport_missing_segmenation_id(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'port_id': uuidutils.generate_uuid(), + 'segmentation_type': 'fake'}]) + self.assertRaises(n_exc.InvalidInput, + validator.validate, + self.context) + + def test_validate_subport_missing_port_id(self): + validator = rules.SubPortsValidator( + self.segmentation_types, + [{'segmentation_type': 'fake', + 'segmentation_id': 100}]) + self.assertRaises(n_exc.InvalidInput, + validator.validate, + self.context, basic_validation=True) + + +class TrunkPortValidatorTestCase(test_plugin.Ml2PluginV2TestCase): + + def setUp(self): + super(TrunkPortValidatorTestCase, self).setUp() + self.drivers_patch = mock.patch.object(drivers, 'register').start() + self.compat_patch = mock.patch.object( + trunk_plugin.TrunkPlugin, 'check_compatibility').start() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + self.trunk_plugin.add_segmentation_type(constants.VLAN, + utils.is_valid_vlan_tag) + + def test_validate_port_parent_in_use_by_trunk(self): + with self.port() as trunk_parent: + trunk = {'port_id': trunk_parent['port']['id'], + 'tenant_id': 'test_tenant', + 'sub_ports': []} + self.trunk_plugin.create_trunk(self.context, {'trunk': trunk}) + validator = rules.TrunkPortValidator(trunk_parent['port']['id']) + self.assertRaises(trunk_exc.ParentPortInUse, + validator.validate, + self.context) + + def test_validate_port_id_in_use_by_unrelated_trunk(self): + with self.port() as trunk_parent,\ + self.port() as subport: + trunk = {'port_id': trunk_parent['port']['id'], + 'tenant_id': 'test_tenant', + 'sub_ports': [{'port_id': subport['port']['id'], + 'segmentation_type': 'vlan', + 'segmentation_id': 2}]} + self.trunk_plugin.create_trunk(self.context, {'trunk': trunk}) + validator = rules.TrunkPortValidator(subport['port']['id']) + self.assertRaises(trunk_exc.TrunkPortInUse, + validator.validate, + self.context) + + def test_validate_port_has_binding_host(self): + with self.port() as port: + core_plugin = manager.NeutronManager.get_plugin() + port['port']['binding:host_id'] = 'host' + core_plugin.update_port(self.context, port['port']['id'], port) + validator = rules.TrunkPortValidator(port['port']['id']) + self.assertTrue(validator.is_bound(self.context)) + + def test_validate_port_cannot_be_trunked_raises(self): + with self.port() as port, \ + mock.patch.object(rules.TrunkPortValidator, + "can_be_trunked", return_value=False), \ + testtools.ExpectedException(trunk_exc.ParentPortInUse): + validator = rules.TrunkPortValidator(port['port']['id']) + validator.validate(self.context) + + def test_can_be_trunked_returns_false(self): + # need to trigger a driver registration + fakes.FakeDriverCanTrunkBoundPort.create() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + with self.port() as port, \ + mock.patch.object(manager.NeutronManager, + "get_service_plugins") as f: + f.return_value = {'trunk': self.trunk_plugin} + core_plugin = manager.NeutronManager.get_plugin() + port['port']['binding:host_id'] = 'host' + core_plugin.update_port(self.context, port['port']['id'], port) + validator = rules.TrunkPortValidator(port['port']['id']) + # port cannot be trunked because of binding mismatch + self.assertFalse(validator.can_be_trunked(self.context)) + + def test_can_be_trunked_returns_true(self): + # need to trigger a driver registration + fakes.FakeDriverCanTrunkBoundPort.create() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + with self.port() as port, \ + mock.patch.object(manager.NeutronManager, + "get_service_plugins") as f, \ + mock.patch.object(trunk_utils, "is_driver_compatible", + return_value=True) as g: + f.return_value = {'trunk': self.trunk_plugin} + core_plugin = manager.NeutronManager.get_plugin() + port['port']['binding:host_id'] = 'host' + core_plugin.update_port(self.context, port['port']['id'], port) + validator = rules.TrunkPortValidator(port['port']['id']) + self.assertTrue(validator.can_be_trunked(self.context)) + self.assertTrue(g.call_count) + + def test_can_be_trunked_unbound_port(self): + with self.port() as port: + validator = rules.TrunkPortValidator(port['port']['id']) + self.assertTrue(validator.can_be_trunked(self.context)) + + def test_can_be_trunked_raises_conflict(self): + d1 = fakes.FakeDriver.create() + d2 = fakes.FakeDriverWithAgent.create() + self.trunk_plugin = trunk_plugin.TrunkPlugin() + self.trunk_plugin._drivers = [d1, d2] + with self.port() as port, \ + mock.patch.object(manager.NeutronManager, + "get_service_plugins") as f, \ + mock.patch.object(trunk_utils, "is_driver_compatible", + return_value=True): + f.return_value = {'trunk': self.trunk_plugin} + core_plugin = manager.NeutronManager.get_plugin() + port['port']['binding:host_id'] = 'host' + core_plugin.update_port(self.context, port['port']['id'], port) + validator = rules.TrunkPortValidator(port['port']['id']) + self.assertRaises( + trunk_exc.TrunkPluginDriverConflict, + validator.can_be_trunked, self.context) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_utils.py neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_utils.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/services/trunk/test_utils.py 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/services/trunk/test_utils.py 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +from neutron.services.trunk import utils +from neutron.tests.unit.plugins.ml2 import test_plugin +from neutron.tests.unit.services.trunk import fakes + + +class UtilsTestCase(test_plugin.Ml2PluginV2TestCase): + + def test_are_agent_types_available_on_host_returns_false(self): + self.assertFalse( + utils.are_agent_types_available_on_host( + self.context, ['foo_type'], 'foo_host')) + + def test_are_agent_types_available_on_host_returns_true(self): + with mock.patch("neutron.db.agents_db.AgentDbMixin.get_agents") as f: + f.return_value = ['foo_agent'] + self.assertTrue( + utils.are_agent_types_available_on_host( + self.context, ['foo_type'], 'foo_host')) + + def _test_is_driver_compatible(self, driver, interface, host, agents=None): + with mock.patch("neutron.db.agents_db.AgentDbMixin.get_agents") as f: + f.return_value = agents or [] + return utils.is_driver_compatible(self.context, + driver, + interface, + host) + + def test_is_driver_compatible(self): + driver = fakes.FakeDriverWithAgent.create() + self.assertTrue(self._test_is_driver_compatible( + driver, 'foo_intfs', 'foo_host', [{'agent_type': 'foo_type'}])) + + def test_is_driver_compatible_agent_based_agent_mismatch(self): + driver = fakes.FakeDriverWithAgent.create() + self.assertFalse(self._test_is_driver_compatible( + driver, 'foo_intfs', 'foo_host')) + + def test_is_driver_incompatible_because_of_interface_mismatch(self): + driver = fakes.FakeDriverWithAgent.create() + self.assertFalse(self._test_is_driver_compatible( + driver, 'not_my_interface', 'foo_host')) + + def test_is_driver_compatible_agentless(self): + driver = fakes.FakeDriver.create() + self.assertTrue(self._test_is_driver_compatible( + driver, 'foo_intfs', 'foo_host')) + + def test_is_driver_compatible_multiple_drivers(self): + driver1 = fakes.FakeDriverWithAgent.create() + driver2 = fakes.FakeDriver2.create() + self.assertTrue(self._test_is_driver_compatible( + driver1, 'foo_intfs', 'foo_host', [{'agent_type': 'foo_type'}])) + self.assertFalse(self._test_is_driver_compatible( + driver2, 'foo_intfs', 'foo_host', [{'agent_type': 'foo_type'}])) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/testlib_api.py neutron-9.0.0~b3~dev557/neutron/tests/unit/testlib_api.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/testlib_api.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/testlib_api.py 2016-08-03 20:10:34.000000000 +0000 @@ -15,10 +15,18 @@ import fixtures import six +import testresources +import testscenarios import testtools +from oslo_db import exception as oslodb_exception +from oslo_db.sqlalchemy import enginefacade +from oslo_db.sqlalchemy import provision +from oslo_db.sqlalchemy import session + from neutron.db import api as db_api # Import all data models +from neutron.db.migration import cli as migration from neutron.db.migration.models import head # noqa from neutron.db import model_base from neutron.tests import base @@ -58,39 +66,317 @@ class SqlFixture(fixtures.Fixture): + """Base of a fixture which can create a schema and delete from + its tables. + + """ - # flag to indicate that the models have been loaded - _TABLES_ESTABLISHED = False + @classmethod + def _generate_schema(cls, engine): + model_base.BASEV2.metadata.create_all(engine) + + def _delete_from_schema(self, engine): + with engine.begin() as conn: + for table in reversed( + model_base.BASEV2.metadata.sorted_tables): + conn.execute(table.delete()) + + def _init_resources(self): + raise NotImplementedError() def _setUp(self): - # Register all data models - engine = db_api.get_engine() - if not SqlFixture._TABLES_ESTABLISHED: - model_base.BASEV2.metadata.create_all(engine) - SqlFixture._TABLES_ESTABLISHED = True - - def clear_tables(): - with engine.begin() as conn: - for table in reversed( - model_base.BASEV2.metadata.sorted_tables): - conn.execute(table.delete()) + self._init_resources() - self.addCleanup(clear_tables) + # check if the fixtures failed to get + # an engine. The test setUp() itself should also be checking + # this and raising skipTest. + if not hasattr(self, 'engine'): + return + engine = self.engine + self.addCleanup(lambda: self._delete_from_schema(engine)) -class SqlTestCaseLight(base.DietTestCase): - """All SQL taste, zero plugin/rpc sugar""" + self.sessionmaker = session.get_maker(engine) - def setUp(self): - super(SqlTestCaseLight, self).setUp() - self.useFixture(SqlFixture()) + self.enginefacade_factory = enginefacade._TestTransactionFactory( + self.engine, self.sessionmaker, apply_global=False, + synchronous_reader=True) + + _restore_factory = db_api.context_manager._root_factory + + db_api.context_manager._root_factory = self.enginefacade_factory + + engine = db_api.context_manager.get_legacy_facade().get_engine() + + self.addCleanup( + lambda: setattr( + db_api.context_manager, + "_root_factory", _restore_factory)) + + self.useFixture(EnableSQLiteFKsFixture(engine)) + + +class EnableSQLiteFKsFixture(fixtures.Fixture): + """Turn SQLite PRAGMA foreign keys on and off for tests. + + FIXME(zzzeek): figure out some way to get oslo.db test_base to honor + oslo_db.engines.create_engine() arguments like sqlite_fks as well + as handling that it needs to be turned off during drops. + """ -class SqlTestCase(base.BaseTestCase): + def __init__(self, engine): + self.engine = engine + + def _setUp(self): + if self.engine.name == 'sqlite': + self.engine.execute("PRAGMA foreign_keys=ON") + + def disable_fks(): + with self.engine.connect() as conn: + conn.connection.rollback() + conn.execute("PRAGMA foreign_keys=OFF") + self.addCleanup(disable_fks) + + +class StaticSqlFixture(SqlFixture): + """Fixture which keeps a single sqlite memory database at the global + scope. + + """ + + _GLOBAL_RESOURCES = False + + @classmethod + def _init_resources(cls): + # this is a classlevel version of what testresources + # does w/ the resources attribute as well as the + # setUpResources() step (which requires a test instance, that + # SqlFixture does not have). Because this is a SQLite memory + # database, we don't actually tear it down, so we can keep + # it running throughout all tests. + if cls._GLOBAL_RESOURCES: + return + else: + cls._GLOBAL_RESOURCES = True + cls.schema_resource = provision.SchemaResource( + provision.DatabaseResource("sqlite"), + cls._generate_schema, teardown=False) + dependency_resources = {} + for name, resource in cls.schema_resource.resources: + dependency_resources[name] = resource.getResource() + cls.schema_resource.make(dependency_resources) + cls.engine = dependency_resources['database'].engine + + +class StaticSqlFixtureNoSchema(SqlFixture): + """Fixture which keeps a single sqlite memory database at the global + scope + + """ + + _GLOBAL_RESOURCES = False + + @classmethod + def _init_resources(cls): + if cls._GLOBAL_RESOURCES: + return + else: + cls._GLOBAL_RESOURCES = True + cls.database_resource = provision.DatabaseResource("sqlite") + dependency_resources = {} + for name, resource in cls.database_resource.resources: + dependency_resources[name] = resource.getResource() + cls.engine = dependency_resources['backend'].engine + + def _delete_from_schema(self, engine): + pass + + +class OpportunisticSqlFixture(SqlFixture): + """Fixture which uses testresources with oslo_db provisioning to + check for available backends and optimize test runs. + + Requires that the test itself implement the resources attribute. + + """ + + DRIVER = 'sqlite' + + def __init__(self, test): + super(OpportunisticSqlFixture, self).__init__() + self.test = test + + @classmethod + def _generate_schema_w_migrations(cls, engine): + alembic_config = migration.get_neutron_config() + with engine.connect() as conn: + alembic_config.attributes['connection'] = conn + migration.do_alembic_command( + alembic_config, 'upgrade', 'heads') + + def _delete_from_schema(self, engine): + if self.test.BUILD_SCHEMA: + super(OpportunisticSqlFixture, self)._delete_from_schema(engine) + + def _init_resources(self): + testresources.setUpResources( + self.test, self.test.resources, testresources._get_result()) + self.addCleanup( + testresources.tearDownResources, + self.test, self.test.resources, testresources._get_result() + ) + + # unfortunately, fixtures won't let us call a skip() from + # here. So the test has to check this also. + # see https://github.com/testing-cabal/fixtures/issues/31 + if hasattr(self.test, 'db'): + self.engine = self.test.engine = self.test.db.engine + + @classmethod + def resources_collection(cls, test): + # reimplement current oslo.db code. + # FIXME(zzzeek) The patterns here are up in the air enough + # that I think keeping this totally separate will give us the + # most leverage in being able to fix oslo.db in an upcoming + # release, then port neutron back to the working version. + + driver = test.DRIVER + + if driver not in test._database_resources: + try: + test._database_resources[driver] = \ + provision.DatabaseResource(driver) + except oslodb_exception.BackendNotAvailable: + test._database_resources[driver] = None + + database_resource = test._database_resources[driver] + if database_resource is None: + return [] + + key = (driver, None) + if test.BUILD_SCHEMA: + if key not in test._schema_resources: + test._schema_resources[key] = provision.SchemaResource( + database_resource, + cls._generate_schema_w_migrations + if test.BUILD_WITH_MIGRATIONS + else cls._generate_schema, teardown=False) + + schema_resource = test._schema_resources[key] + return [ + ('schema', schema_resource), + ('db', database_resource) + ] + else: + return [ + ('db', database_resource) + ] + + +class BaseSqlTestCase(object): + BUILD_SCHEMA = True def setUp(self): - super(SqlTestCase, self).setUp() - self.useFixture(SqlFixture()) + super(BaseSqlTestCase, self).setUp() + + self._setup_database_fixtures() + + def _setup_database_fixtures(self): + if self.BUILD_SCHEMA: + fixture = StaticSqlFixture() + else: + fixture = StaticSqlFixtureNoSchema() + self.useFixture(fixture) + self.engine = fixture.engine + + +class SqlTestCaseLight(BaseSqlTestCase, base.DietTestCase): + """All SQL taste, zero plugin/rpc sugar""" + + +class SqlTestCase(BaseSqlTestCase, base.BaseTestCase): + """regular sql test""" + + +class OpportunisticDBTestMixin(object): + """Mixin that converts a BaseSqlTestCase to use the OpportunisticSqlFixture. + """ + + SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') + + FIXTURE = OpportunisticSqlFixture + + BUILD_WITH_MIGRATIONS = False + + def _setup_database_fixtures(self): + self.useFixture(self.FIXTURE(self)) + + if not hasattr(self, 'db'): + msg = "backend '%s' unavailable" % self.DRIVER + if self.SKIP_ON_UNAVAILABLE_DB: + self.skip(msg) + else: + self.fail(msg) + + _schema_resources = {} + _database_resources = {} + + @property + def resources(self): + """this attribute is used by testresources for optimized + sorting of tests. + + This is the big requirement that allows testresources to sort + tests such that database "resources" can be kept open for + many tests at once. + + IMO(zzzeek) "sorting" should not be needed; only that necessary + resources stay open as long as they are needed (or long enough to + reduce overhead). testresources would be improved to not depend on + custom, incompatible-with-pytest "suite classes", fixture information + leaking out of the Fixture classes themselves, and exotic sorting + schemes for something that can nearly always be handled "good enough" + with unittest-standard setupclass/setupmodule schemes. + + """ + + return self.FIXTURE.resources_collection(self) + + +class MySQLTestCaseMixin(OpportunisticDBTestMixin): + """Mixin that turns any BaseSqlTestCase into a MySQL test suite. + + If the MySQL db is unavailable then this test is skipped, unless + OS_FAIL_ON_MISSING_DEPS is enabled. + """ + DRIVER = "mysql" + + +class PostgreSQLTestCaseMixin(OpportunisticDBTestMixin): + """Mixin that turns any BaseSqlTestCase into a PostgresSQL test suite. + + If the PostgreSQL db is unavailable then this test is skipped, unless + OS_FAIL_ON_MISSING_DEPS is enabled. + """ + DRIVER = "postgresql" + + +def module_load_tests(loader, found_tests, pattern): + """Apply OptimisingTestSuite on a per-module basis. + + FIXME(zzzeek): oslo.db provides this but the contract that + "pattern" should be None no longer seems to behave as it used + to at the module level, so this function needs to be added in this + form. + + """ + + result = testresources.OptimisingTestSuite() + found_tests = testscenarios.load_tests_apply_scenarios( + loader, found_tests, pattern) + result.addTest(found_tests) + return result class WebTestCase(SqlTestCase): diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/test_policy.py neutron-9.0.0~b3~dev557/neutron/tests/unit/test_policy.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/test_policy.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/test_policy.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,7 +16,7 @@ """Test of Policy Engine For Neutron""" import mock -from neutron_lib import constants as const +from neutron_lib import constants from neutron_lib import exceptions from oslo_db import exception as db_exc from oslo_policy import fixture as op_fixture @@ -27,7 +27,6 @@ import neutron from neutron.api.v2 import attributes from neutron.common import constants as n_const -from neutron.common import exceptions as n_exc from neutron import context from neutron import manager from neutron import policy @@ -330,10 +329,10 @@ oslo_policy.PolicyNotAuthorized) def test_create_port_device_owner_regex(self): - blocked_values = (const.DEVICE_OWNER_NETWORK_PREFIX, + blocked_values = (constants.DEVICE_OWNER_NETWORK_PREFIX, 'network:abdef', - const.DEVICE_OWNER_DHCP, - const.DEVICE_OWNER_ROUTER_INTF) + constants.DEVICE_OWNER_DHCP, + constants.DEVICE_OWNER_ROUTER_INTF) for val in blocked_values: self._test_advsvc_action_on_attr( 'create', 'port', 'device_owner', val, @@ -570,7 +569,7 @@ def test_tenant_id_check_no_target_field_raises(self): # Try and add a bad rule self.assertRaises( - n_exc.PolicyInitError, + exceptions.PolicyInitError, oslo_policy.Rules.from_dict, {'test_policy': 'tenant_id:(wrong_stuff)'}) @@ -580,7 +579,7 @@ action = "create_network" target = {'tenant_id': 'fake'} self.fakepolicyinit() - self.assertRaises(n_exc.PolicyCheckError, + self.assertRaises(exceptions.PolicyCheckError, policy.enforce, self.context, action, target) @@ -617,3 +616,43 @@ policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_')) self.assertTrue(mock_is_e.called) self.assertTrue(mock_debug.called) + + def test__is_attribute_explicitly_set(self): + action = 'create' + attr = 'attr' + + target = {attr: 'valueA', 'tgt-tenant': 'tenantA'} + resource = {attr: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'enforce_policy': True, + 'validate': {'type:string': 10}}} + + result = policy._is_attribute_explicitly_set( + attr, resource, target, action) + self.assertTrue(result) + + target = {'tgt-tenant': 'tenantA'} + result = policy._is_attribute_explicitly_set( + attr, resource, target, action) + self.assertFalse(result) + + resource = {attr: {'allow_post': True, + 'allow_put': True, + 'is_visible': True, + 'default': 'DfltValue', + 'enforce_policy': True, + 'validate': {'type:string': 10}}} + result = policy._is_attribute_explicitly_set( + attr, resource, target, action) + self.assertFalse(result) + + target = {attr: 'DfltValue', 'tgt-tenant': 'tenantA'} + result = policy._is_attribute_explicitly_set( + attr, resource, target, action) + self.assertFalse(result) + + target = {attr: constants.ATTR_NOT_SPECIFIED, 'tgt-tenant': 'tenantA'} + result = policy._is_attribute_explicitly_set( + attr, resource, target, action) + self.assertFalse(result) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/tests/common/test_net_helpers.py neutron-9.0.0~b3~dev557/neutron/tests/unit/tests/common/test_net_helpers.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/tests/common/test_net_helpers.py 2016-06-17 15:30:29.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/tests/common/test_net_helpers.py 2016-08-29 20:05:49.000000000 +0000 @@ -53,7 +53,7 @@ def test_get_free_namespace_port(self): ss_output2 = ss_output - for p in range(1024, 65535): + for p in range(1024, 32767): ss_output2 += ss_output_template % p with mock.patch('neutron.agent.linux.ip_lib.IPWrapper') \ @@ -63,4 +63,10 @@ ipwrapper.return_value = m result = net_helpers.get_free_namespace_port( n_const.PROTO_NAME_TCP) - self.assertEqual(65535, result) + self.assertEqual(32767, result) + + def test_get_unused_port(self): + with mock.patch('neutron.agent.linux.utils.execute') as ex: + ex.return_value = "2048\t61000" + result = net_helpers.get_unused_port(set(range(1025, 2048))) + self.assertEqual(1024, result) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/tests/test_tools.py neutron-9.0.0~b3~dev557/neutron/tests/unit/tests/test_tools.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/tests/test_tools.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/tests/test_tools.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -import sys - -from neutron.tests import base -from neutron.tests import tools -from neutron.tests.unit import tests # noqa - - -EXAMPLE_MODULE = 'neutron.tests.unit.tests.example.dir.example_module' - - -class ImportModulesRecursivelyTestCase(base.BaseTestCase): - - def test_object_modules(self): - sys.modules.pop(EXAMPLE_MODULE, None) - modules = tools.import_modules_recursively( - os.path.dirname(tests.__file__)) - self.assertIn( - 'neutron.tests.unit.tests.example.dir.example_module', - modules) - self.assertIn(EXAMPLE_MODULE, sys.modules) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/test_service.py neutron-9.0.0~b3~dev557/neutron/tests/unit/test_service.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/test_service.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/test_service.py 2016-08-03 20:10:34.000000000 +0000 @@ -15,7 +15,10 @@ import mock +from oslo_config import cfg + from neutron import service +from neutron.tests import base from neutron.tests.unit import test_wsgi @@ -25,3 +28,30 @@ _plugin = mock.Mock() rpc_worker = service.RpcWorker(_plugin) self._test_reset(rpc_worker) + + +class TestRunWsgiApp(base.BaseTestCase): + def setUp(self): + super(TestRunWsgiApp, self).setUp() + self.processor_count = mock.patch( + 'oslo_concurrency.processutils.get_worker_count' + ).start().return_value + + def _test_api_workers(self, config_value, expected_passed_value): + if config_value is not None: + cfg.CONF.set_override('api_workers', config_value) + with mock.patch('neutron.wsgi.Server') as mock_server: + service.run_wsgi_app(mock.sentinel.app) + start_call = mock_server.return_value.start.call_args + expected_call = mock.call( + mock.ANY, mock.ANY, mock.ANY, workers=expected_passed_value) + self.assertEqual(expected_call, start_call) + + def test_api_workers_zero(self): + self._test_api_workers(0, 0) + + def test_api_workers_default(self): + self._test_api_workers(None, self.processor_count) + + def test_api_workers_defined(self): + self._test_api_workers(42, 42) diff -Nru neutron-9.0.0~b2~dev280/neutron/tests/unit/test_wsgi.py neutron-9.0.0~b3~dev557/neutron/tests/unit/test_wsgi.py --- neutron-9.0.0~b2~dev280/neutron/tests/unit/test_wsgi.py 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/tests/unit/test_wsgi.py 2016-08-29 20:05:49.000000000 +0000 @@ -26,7 +26,6 @@ import webob.exc from neutron.common import exceptions as n_exc -from neutron.db import api from neutron.tests import base from neutron.tests.common import helpers from neutron import wsgi @@ -67,10 +66,8 @@ class TestWorkerService(TestServiceBase): """WorkerService tests.""" - @mock.patch('neutron.db.api.get_engine') + @mock.patch('neutron.db.api.context_manager.get_legacy_facade') def test_start_withoutdb_call(self, apimock): - # clear engine from other tests - api._FACADE = None _service = mock.Mock() _service.pool.spawn.return_value = None diff -Nru neutron-9.0.0~b2~dev280/neutron/worker.py neutron-9.0.0~b3~dev557/neutron/worker.py --- neutron-9.0.0~b2~dev280/neutron/worker.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/worker.py 2016-08-03 20:10:34.000000000 +0000 @@ -89,4 +89,4 @@ def start(self): if self.worker_process_count > 0: - registry.notify(resources.PROCESS, events.AFTER_CREATE, self.start) + registry.notify(resources.PROCESS, events.AFTER_INIT, self.start) diff -Nru neutron-9.0.0~b2~dev280/neutron/wsgi.py neutron-9.0.0~b3~dev557/neutron/wsgi.py --- neutron-9.0.0~b2~dev280/neutron/wsgi.py 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron/wsgi.py 2016-08-29 20:05:49.000000000 +0000 @@ -16,14 +16,13 @@ """ Utility methods for working with WSGI servers """ -from __future__ import print_function - import errno import socket import sys import time import eventlet.wsgi +from neutron.conf import wsgi as wsgi_config from neutron_lib import exceptions as exception from oslo_config import cfg import oslo_i18n @@ -46,22 +45,8 @@ from neutron.db import api from neutron import worker as neutron_worker -socket_opts = [ - cfg.IntOpt('backlog', - default=4096, - help=_("Number of backlog requests to configure " - "the socket with")), - cfg.IntOpt('retry_until_window', - default=30, - help=_("Number of seconds to keep retrying to listen")), - cfg.BoolOpt('use_ssl', - default=False, - help=_('Enable SSL on the API server')), -] - CONF = cfg.CONF -CONF.register_opts(socket_opts) -wsgi.register_opts(CONF) +wsgi_config.register_socket_opts() LOG = logging.getLogger(__name__) @@ -203,7 +188,7 @@ # dispose the whole pool before os.fork, otherwise there will # be shared DB connections in child processes which may cause # DB errors. - api.dispose() + api.context_manager.dispose_pool() # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. @@ -267,7 +252,7 @@ return bm or 'application/json' def get_content_type(self): - allowed_types = ("application/json") + allowed_types = ("application/json",) if "Content-Type" not in self.headers: LOG.debug("Missing Content-Type") return None diff -Nru neutron-9.0.0~b2~dev280/neutron.egg-info/entry_points.txt neutron-9.0.0~b3~dev557/neutron.egg-info/entry_points.txt --- neutron-9.0.0~b2~dev280/neutron.egg-info/entry_points.txt 2016-06-27 15:31:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron.egg-info/entry_points.txt 2016-08-29 20:06:00.000000000 +0000 @@ -30,6 +30,7 @@ openvswitch = neutron.agent.linux.openvswitch_firewall:OVSFirewallDriver [neutron.agent.l2.extensions] +fdb = neutron.agent.l2.extensions.fdb_population:FdbPopulationAgentExtension qos = neutron.agent.l2.extensions.qos:QosAgentExtension [neutron.agent.linux.pd_drivers] @@ -96,10 +97,12 @@ neutron.services.loadbalancer.plugin.LoadBalancerPlugin = neutron_lbaas.services.loadbalancer.plugin:LoadBalancerPlugin neutron.services.vpn.plugin.VPNDriverPlugin = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin qos = neutron.services.qos.qos_plugin:QoSPlugin +revisions = neutron.services.revisions.revision_plugin:RevisionPlugin router = neutron.services.l3_router.l3_router_plugin:L3RouterPlugin segments = neutron.services.segments.plugin:Plugin tag = neutron.services.tag.tag_plugin:TagPlugin timestamp_core = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin +trunk = neutron.services.trunk.plugin:TrunkPlugin vpnaas = neutron_vpnaas.services.vpn.plugin:VPNDriverPlugin [neutron.services.external_dns_drivers] diff -Nru neutron-9.0.0~b2~dev280/neutron.egg-info/pbr.json neutron-9.0.0~b3~dev557/neutron.egg-info/pbr.json --- neutron-9.0.0~b2~dev280/neutron.egg-info/pbr.json 2016-06-27 15:31:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron.egg-info/pbr.json 2016-08-29 20:06:00.000000000 +0000 @@ -1 +1 @@ -{"is_release": false, "git_version": "e8c9768"} \ No newline at end of file +{"is_release": false, "git_version": "7992703"} \ No newline at end of file diff -Nru neutron-9.0.0~b2~dev280/neutron.egg-info/PKG-INFO neutron-9.0.0~b3~dev557/neutron.egg-info/PKG-INFO --- neutron-9.0.0~b2~dev280/neutron.egg-info/PKG-INFO 2016-06-27 15:31:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron.egg-info/PKG-INFO 2016-08-29 20:06:00.000000000 +0000 @@ -1,8 +1,8 @@ Metadata-Version: 1.1 Name: neutron -Version: 9.0.0.0b2.dev280 +Version: 9.0.0.0b3.dev557 Summary: OpenStack Networking -Home-page: http://www.openstack.org/ +Home-page: http://docs.openstack.org/developer/neutron/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN @@ -23,13 +23,16 @@ available at: . This includes: Neutron Administrator Guide - http://docs.openstack.org/admin-guide-cloud/networking.html + http://docs.openstack.org/admin-guide/networking.html + + Neutron Developer Guide + http://docs.openstack.org/developer/neutron/devref/ Networking Guide http://docs.openstack.org/networking-guide/ Neutron API Reference: - http://docs.openstack.org/api/openstack-network/2.0/content/ + http://developer.openstack.org/api-ref/networking/v2/ Current Neutron developer documentation is available at: http://wiki.openstack.org/NeutronDevelopment @@ -52,3 +55,4 @@ Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 diff -Nru neutron-9.0.0~b2~dev280/neutron.egg-info/requires.txt neutron-9.0.0~b3~dev557/neutron.egg-info/requires.txt --- neutron-9.0.0~b2~dev280/neutron.egg-info/requires.txt 2016-06-27 15:31:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron.egg-info/requires.txt 2016-08-29 20:06:00.000000000 +0000 @@ -3,7 +3,7 @@ PasteDeploy>=1.5.0 debtcollector>=1.2.0 eventlet!=0.18.3,>=0.18.2 -pecan>=1.0.0 +pecan!=1.0.2,!=1.0.3,!=1.0.4,>=1.0.0 greenlet>=0.3.2 httplib2>=0.7.5 requests>=2.10.0 @@ -11,33 +11,33 @@ keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 netaddr!=0.7.16,>=0.7.12 netifaces>=0.10.4 -neutron-lib>=0.2.0 -python-neutronclient>=4.2.0 +neutron-lib>=0.4.0 +python-neutronclient>=5.1.0 retrying!=1.3.0,>=1.2.3 -ryu!=4.1,!=4.2,!=4.2.1,>=3.30 +ryu!=4.1,!=4.2,!=4.2.1,!=4.4,>=3.30 SQLAlchemy<1.1.0,>=1.0.10 WebOb>=1.2.3 -keystoneauth1>=2.7.0 +keystoneauth1>=2.10.0 alembic>=0.8.4 six>=1.9.0 -stevedore>=1.10.0 +stevedore>=1.16.0 oslo.cache>=1.5.0 oslo.concurrency>=3.8.0 -oslo.config>=3.10.0 -oslo.context>=2.4.0 -oslo.db>=4.1.0 +oslo.config>=3.14.0 +oslo.context>=2.9.0 +oslo.db>=4.10.0 oslo.i18n>=2.1.0 oslo.log>=1.14.0 oslo.messaging>=5.2.0 oslo.middleware>=3.0.0 oslo.policy>=1.9.0 oslo.reports>=0.6.0 -oslo.rootwrap>=2.0.0 +oslo.rootwrap>=5.0.0 oslo.serialization>=1.10.0 oslo.service>=1.10.0 -oslo.utils>=3.11.0 -oslo.versionedobjects>=1.9.1 -osprofiler>=1.3.0 +oslo.utils>=3.16.0 +oslo.versionedobjects>=1.13.0 +osprofiler>=1.4.0 python-novaclient!=2.33.0,>=2.29.0 python-designateclient>=1.5.0 @@ -49,4 +49,4 @@ ovs>=2.5.0 [:(python_version>='3.4')] -ovs>=2.6.0.dev1 +ovs>=2.6.0.dev3 diff -Nru neutron-9.0.0~b2~dev280/neutron.egg-info/SOURCES.txt neutron-9.0.0~b3~dev557/neutron.egg-info/SOURCES.txt --- neutron-9.0.0~b2~dev280/neutron.egg-info/SOURCES.txt 2016-06-27 15:31:51.000000000 +0000 +++ neutron-9.0.0~b3~dev557/neutron.egg-info/SOURCES.txt 2016-08-29 20:06:01.000000000 +0000 @@ -28,6 +28,7 @@ devstack/lib/ml2 devstack/lib/ovs devstack/lib/qos +devstack/lib/trunk devstack/lib/ml2_drivers/sriovnicswitch doc/Makefile doc/pom.xml @@ -35,13 +36,16 @@ doc/source/index.rst doc/source/dashboards/index.rst doc/source/devref/address_scopes.rst +doc/source/devref/agent_extensions.rst doc/source/devref/alembic_migrations.rst doc/source/devref/api_extensions.rst doc/source/devref/api_layer.rst doc/source/devref/callbacks.rst +doc/source/devref/calling_ml2_plugin.rst doc/source/devref/client_command_extensions.rst doc/source/devref/contribute.rst doc/source/devref/db_layer.rst +doc/source/devref/db_models.rst doc/source/devref/development.environment.rst doc/source/devref/dns_order.rst doc/source/devref/effective_neutron.rst @@ -87,9 +91,9 @@ doc/source/policies/neutron-teams.rst doc/source/policies/release-checklist.rst doc/source/policies/thirdparty-ci.rst +doc/source/stadium/governance.rst +doc/source/stadium/guidelines.rst doc/source/stadium/index.rst -doc/source/stadium/sub_project_guidelines.rst -doc/source/stadium/sub_projects.rst etc/README.txt etc/api-paste.ini etc/policy.json @@ -136,6 +140,8 @@ neutron.egg-info/requires.txt neutron.egg-info/top_level.txt neutron/agent/__init__.py +neutron/agent/agent_extension.py +neutron/agent/agent_extensions_manager.py neutron/agent/dhcp_agent.py neutron/agent/firewall.py neutron/agent/l3_agent.py @@ -151,11 +157,12 @@ neutron/agent/common/utils.py neutron/agent/dhcp/__init__.py neutron/agent/dhcp/agent.py -neutron/agent/dhcp/config.py neutron/agent/l2/__init__.py neutron/agent/l2/agent_extension.py +neutron/agent/l2/l2_agent_extension.py +neutron/agent/l2/l2_agent_extensions_manager.py neutron/agent/l2/extensions/__init__.py -neutron/agent/l2/extensions/manager.py +neutron/agent/l2/extensions/fdb_population.py neutron/agent/l2/extensions/qos.py neutron/agent/l3/__init__.py neutron/agent/l3/agent.py @@ -172,6 +179,8 @@ neutron/agent/l3/ha_router.py neutron/agent/l3/item_allocator.py neutron/agent/l3/keepalived_state_change.py +neutron/agent/l3/l3_agent_extension.py +neutron/agent/l3/l3_agent_extensions_manager.py neutron/agent/l3/legacy_router.py neutron/agent/l3/link_local_allocator.py neutron/agent/l3/namespace_manager.py @@ -304,8 +313,30 @@ neutron/common/topics.py neutron/common/utils.py neutron/conf/__init__.py +neutron/conf/common.py neutron/conf/quota.py neutron/conf/service.py +neutron/conf/wsgi.py +neutron/conf/agent/__init__.py +neutron/conf/agent/cmd.py +neutron/conf/agent/dhcp.py +neutron/conf/agent/ovs_conf.py +neutron/conf/agent/securitygroups_rpc.py +neutron/conf/agent/l3/__init__.py +neutron/conf/agent/l3/config.py +neutron/conf/agent/l3/keepalived.py +neutron/conf/extensions/__init__.py +neutron/conf/extensions/allowedaddresspairs.py +neutron/conf/plugins/__init__.py +neutron/conf/plugins/ml2/__init__.py +neutron/conf/plugins/ml2/drivers/__init__.py +neutron/conf/plugins/ml2/drivers/agent.py +neutron/conf/plugins/ml2/drivers/linuxbridge.py +neutron/conf/services/__init__.py +neutron/conf/services/extdns_designate_driver.py +neutron/conf/services/metering_agent.py +neutron/conf/services/provider_configuration.py +neutron/conf/services/qos_driver_manager.py neutron/core_extensions/__init__.py neutron/core_extensions/base.py neutron/core_extensions/qos.py @@ -354,7 +385,9 @@ neutron/db/servicetype_db.py neutron/db/sqlalchemytypes.py neutron/db/sqlalchemyutils.py +neutron/db/standard_attr.py neutron/db/standardattrdescription_db.py +neutron/db/subnet_service_type_db_models.py neutron/db/tag_db.py neutron/db/vlantransparent_db.py neutron/db/allowed_address_pairs/__init__.py @@ -444,20 +477,41 @@ neutron/db/migration/alembic_migrations/versions/mitaka/expand/c3a73f615e4_add_ip_version_to_address_scope.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/dce3ec7a25c9_router_az.py neutron/db/migration/alembic_migrations/versions/mitaka/expand/ec7fcfbf72ee_network_az.py +neutron/db/migration/alembic_migrations/versions/newton/contract/3b935b28e7a0_migrate_to_pluggable_ipam.py neutron/db/migration/alembic_migrations/versions/newton/contract/4bcd4df1f426_rename_ml2_dvr_port_bindings.py neutron/db/migration/alembic_migrations/versions/newton/contract/7bbb25278f53_device_owner_ha_replicate_int.py +neutron/db/migration/alembic_migrations/versions/newton/contract/7d9d8eeec6ad_rename_tenant_to_project.py neutron/db/migration/alembic_migrations/versions/newton/contract/89ab9a816d70_rename_ml2_network_segments.py neutron/db/migration/alembic_migrations/versions/newton/contract/8fd3918ef6f4_add_segment_host_mapping.py +neutron/db/migration/alembic_migrations/versions/newton/contract/a84ccf28f06a_migrate_dns_name_from_port.py +neutron/db/migration/alembic_migrations/versions/newton/contract/a8b517cff8ab_add_routerport_bindings_for_ha.py +neutron/db/migration/alembic_migrations/versions/newton/contract/b67e765a3524_remove_mtu_column_from_networks.py neutron/db/migration/alembic_migrations/versions/newton/contract/c879c5e1ee90_add_segment_id_to_subnet.py +neutron/db/migration/alembic_migrations/versions/newton/expand/030a959ceafa_uniq_routerports0port_id.py neutron/db/migration/alembic_migrations/versions/newton/expand/30107ab6a3ee_provisioning_blocks.py +neutron/db/migration/alembic_migrations/versions/newton/expand/3d0e74aa7d37_add_flavor_id_to_routers.py neutron/db/migration/alembic_migrations/versions/newton/expand/45f8dd33480b_qos_dscp_db_addition.py neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py +neutron/db/migration/alembic_migrations/versions/newton/expand/a5648cfeeadf_add_subnet_service_types.py +neutron/db/migration/alembic_migrations/versions/newton/expand/a963b38d82f4_add_dns_name_to_portdnses.py neutron/db/migration/alembic_migrations/versions/newton/expand/c415aab1c048_add_revisions_column.py neutron/db/migration/alembic_migrations/versions/newton/expand/d3435b514502_add_device_id_index_to_port.py neutron/db/migration/models/__init__.py neutron/db/migration/models/head.py +neutron/db/models/README +neutron/db/models/__init__.py +neutron/db/models/address_scope.py +neutron/db/models/allowed_address_pair.py +neutron/db/models/securitygroup.py +neutron/db/models/subnet_service_type.py +neutron/db/models/plugins/__init__.py +neutron/db/models/plugins/ml2/__init__.py +neutron/db/models/plugins/ml2/flatallocation.py +neutron/db/models/plugins/ml2/gre_allocation_endpoints.py neutron/db/network_dhcp_agent_binding/__init__.py neutron/db/network_dhcp_agent_binding/models.py +neutron/db/port_security/__init__.py +neutron/db/port_security/models.py neutron/db/qos/__init__.py neutron/db/qos/api.py neutron/db/qos/models.py @@ -484,30 +538,39 @@ neutron/extensions/extra_dhcp_opt.py neutron/extensions/extraroute.py neutron/extensions/flavors.py +neutron/extensions/ip_allocation.py +neutron/extensions/l2_adjacency.py neutron/extensions/l3.py neutron/extensions/l3_ext_gw_mode.py neutron/extensions/l3_ext_ha_mode.py +neutron/extensions/l3_flavors.py neutron/extensions/l3agentscheduler.py neutron/extensions/metering.py neutron/extensions/multiprovidernet.py neutron/extensions/netmtu.py neutron/extensions/network_availability_zone.py neutron/extensions/network_ip_availability.py +neutron/extensions/pagination.py neutron/extensions/portbindings.py neutron/extensions/portsecurity.py neutron/extensions/providernet.py neutron/extensions/qos.py neutron/extensions/quotasv2.py neutron/extensions/rbac.py +neutron/extensions/revisions.py neutron/extensions/router_availability_zone.py neutron/extensions/routerservicetype.py neutron/extensions/securitygroup.py neutron/extensions/segment.py neutron/extensions/servicetype.py +neutron/extensions/sorting.py neutron/extensions/standardattrdescription.py +neutron/extensions/subnet_service_types.py neutron/extensions/subnetallocation.py neutron/extensions/tag.py neutron/extensions/timestamp_core.py +neutron/extensions/trunk.py +neutron/extensions/trunk_details.py neutron/extensions/vlantransparent.py neutron/hacking/__init__.py neutron/hacking/checks.py @@ -547,13 +610,20 @@ neutron/objects/base.py neutron/objects/common_types.py neutron/objects/rbac_db.py +neutron/objects/securitygroup.py neutron/objects/subnet.py neutron/objects/subnetpool.py neutron/objects/trunk.py +neutron/objects/utils.py neutron/objects/db/__init__.py neutron/objects/db/api.py neutron/objects/extensions/__init__.py +neutron/objects/extensions/port_security.py neutron/objects/extensions/standardattributes.py +neutron/objects/network/__init__.py +neutron/objects/network/network_segment.py +neutron/objects/network/extensions/__init__.py +neutron/objects/network/extensions/port_security.py neutron/objects/port/__init__.py neutron/objects/port/extensions/__init__.py neutron/objects/port/extensions/allowedaddresspairs.py @@ -615,6 +685,7 @@ neutron/plugins/ml2/drivers/agent/__init__.py neutron/plugins/ml2/drivers/agent/_agent_manager_base.py neutron/plugins/ml2/drivers/agent/_common_agent.py +neutron/plugins/ml2/drivers/agent/capabilities.py neutron/plugins/ml2/drivers/agent/config.py neutron/plugins/ml2/drivers/l2pop/README neutron/plugins/ml2/drivers/l2pop/__init__.py @@ -659,8 +730,10 @@ neutron/plugins/ml2/drivers/openvswitch/agent/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/main.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py +neutron/plugins/ml2/drivers/openvswitch/agent/ovs_capabilities.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py neutron/plugins/ml2/drivers/openvswitch/agent/common/__init__.py neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py neutron/plugins/ml2/drivers/openvswitch/agent/common/constants.py @@ -725,6 +798,13 @@ neutron/services/l3_router/README neutron/services/l3_router/__init__.py neutron/services/l3_router/l3_router_plugin.py +neutron/services/l3_router/service_providers/__init__.py +neutron/services/l3_router/service_providers/base.py +neutron/services/l3_router/service_providers/driver_controller.py +neutron/services/l3_router/service_providers/dvr.py +neutron/services/l3_router/service_providers/dvrha.py +neutron/services/l3_router/service_providers/ha.py +neutron/services/l3_router/service_providers/single_node.py neutron/services/metering/__init__.py neutron/services/metering/metering_plugin.py neutron/services/metering/agents/__init__.py @@ -745,6 +825,8 @@ neutron/services/qos/notification_drivers/message_queue.py neutron/services/qos/notification_drivers/qos_base.py neutron/services/rbac/__init__.py +neutron/services/revisions/__init__.py +neutron/services/revisions/revision_plugin.py neutron/services/segments/__init__.py neutron/services/segments/db.py neutron/services/segments/exceptions.py @@ -755,9 +837,31 @@ neutron/services/timestamp/timestamp_db.py neutron/services/timestamp/timestamp_plugin.py neutron/services/trunk/__init__.py -neutron/services/trunk/db.py +neutron/services/trunk/callbacks.py +neutron/services/trunk/constants.py neutron/services/trunk/exceptions.py neutron/services/trunk/models.py +neutron/services/trunk/plugin.py +neutron/services/trunk/rules.py +neutron/services/trunk/utils.py +neutron/services/trunk/drivers/__init__.py +neutron/services/trunk/drivers/base.py +neutron/services/trunk/drivers/linuxbridge/__init__.py +neutron/services/trunk/drivers/linuxbridge/driver.py +neutron/services/trunk/drivers/openvswitch/__init__.py +neutron/services/trunk/drivers/openvswitch/constants.py +neutron/services/trunk/drivers/openvswitch/driver.py +neutron/services/trunk/drivers/openvswitch/agent/__init__.py +neutron/services/trunk/drivers/openvswitch/agent/driver.py +neutron/services/trunk/drivers/openvswitch/agent/exceptions.py +neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py +neutron/services/trunk/rpc/__init__.py +neutron/services/trunk/rpc/agent.py +neutron/services/trunk/rpc/backend.py +neutron/services/trunk/rpc/constants.py +neutron/services/trunk/rpc/server.py +neutron/services/trunk/validators/__init__.py +neutron/services/trunk/validators/vlan.py neutron/tests/__init__.py neutron/tests/base.py neutron/tests/fake_notifier.py @@ -785,12 +889,12 @@ neutron/tests/contrib/gate_hook.sh neutron/tests/contrib/post_test_hook.sh neutron/tests/contrib/hooks/api_extensions +neutron/tests/contrib/hooks/iptables_verify neutron/tests/contrib/hooks/osprofiler -neutron/tests/contrib/hooks/pagination neutron/tests/contrib/hooks/pecan neutron/tests/contrib/hooks/qos neutron/tests/contrib/hooks/quotas -neutron/tests/contrib/hooks/sorting +neutron/tests/contrib/hooks/trunk neutron/tests/etc/api-paste.ini.test neutron/tests/etc/neutron.conf neutron/tests/etc/neutron_test.conf @@ -810,6 +914,7 @@ neutron/tests/fullstack/resources/process.py neutron/tests/functional/__init__.py neutron/tests/functional/base.py +neutron/tests/functional/constants.py neutron/tests/functional/requirements.txt neutron/tests/functional/test_server.py neutron/tests/functional/test_service.py @@ -852,20 +957,26 @@ neutron/tests/functional/agent/linux/test_utils.py neutron/tests/functional/agent/linux/bin/__init__.py neutron/tests/functional/agent/linux/bin/ipt_binname.py +neutron/tests/functional/agent/ovsdb/__init__.py +neutron/tests/functional/agent/ovsdb/test_impl_idl.py neutron/tests/functional/agent/windows/__init__.py neutron/tests/functional/agent/windows/test_ip_lib.py neutron/tests/functional/api/__init__.py neutron/tests/functional/api/test_policies.py neutron/tests/functional/cmd/__init__.py +neutron/tests/functional/cmd/test_ipset_cleanup.py neutron/tests/functional/cmd/test_linuxbridge_cleanup.py neutron/tests/functional/cmd/test_netns_cleanup.py +neutron/tests/functional/cmd/test_ovs_cleanup.py neutron/tests/functional/common/__init__.py neutron/tests/functional/common/test_utils.py neutron/tests/functional/db/__init__.py neutron/tests/functional/db/test_ipam.py -neutron/tests/functional/db/test_migrations.conf neutron/tests/functional/db/test_migrations.py neutron/tests/functional/db/test_models.py +neutron/tests/functional/db/migrations/__init__.py +neutron/tests/functional/db/migrations/test_3b935b28e7a0_migrate_to_pluggable_ipam.py +neutron/tests/functional/db/migrations/test_a8b517cff8ab_add_routerport_bindings_for_ha.py neutron/tests/functional/pecan_wsgi/__init__.py neutron/tests/functional/pecan_wsgi/config.py neutron/tests/functional/pecan_wsgi/test_controllers.py @@ -888,6 +999,12 @@ neutron/tests/functional/services/l3_router/__init__.py neutron/tests/functional/services/l3_router/test_l3_dvr_ha_router_plugin.py neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py +neutron/tests/functional/services/trunk/__init__.py +neutron/tests/functional/services/trunk/test_plugin.py +neutron/tests/functional/services/trunk/drivers/__init__.py +neutron/tests/functional/services/trunk/drivers/openvswitch/__init__.py +neutron/tests/functional/services/trunk/drivers/openvswitch/agent/__init__.py +neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py neutron/tests/functional/tests/__init__.py neutron/tests/functional/tests/common/__init__.py neutron/tests/functional/tests/common/exclusive_resources/__init__.py @@ -916,6 +1033,7 @@ neutron/tests/tempest/api/test_auto_allocated_topology.py neutron/tests/tempest/api/test_dhcp_ipv6.py neutron/tests/tempest/api/test_extension_driver_port_security.py +neutron/tests/tempest/api/test_extensions.py neutron/tests/tempest/api/test_extra_dhcp_options.py neutron/tests/tempest/api/test_flavors_extensions.py neutron/tests/tempest/api/test_floating_ips.py @@ -926,6 +1044,7 @@ neutron/tests/tempest/api/test_networks_negative.py neutron/tests/tempest/api/test_ports.py neutron/tests/tempest/api/test_qos.py +neutron/tests/tempest/api/test_revisions.py neutron/tests/tempest/api/test_routers.py neutron/tests/tempest/api/test_routers_negative.py neutron/tests/tempest/api/test_security_groups.py @@ -935,6 +1054,9 @@ neutron/tests/tempest/api/test_subnetpools_negative.py neutron/tests/tempest/api/test_subnets.py neutron/tests/tempest/api/test_timestamp.py +neutron/tests/tempest/api/test_trunk.py +neutron/tests/tempest/api/test_trunk_details.py +neutron/tests/tempest/api/test_trunk_negative.py neutron/tests/tempest/api/admin/__init__.py neutron/tests/tempest/api/admin/test_agent_management.py neutron/tests/tempest/api/admin/test_dhcp_agent_scheduler.py @@ -951,7 +1073,9 @@ neutron/tests/tempest/scenario/__init__.py neutron/tests/tempest/scenario/base.py neutron/tests/tempest/scenario/constants.py +neutron/tests/tempest/scenario/exceptions.py neutron/tests/tempest/scenario/test_basic.py +neutron/tests/tempest/scenario/test_qos.py neutron/tests/tempest/services/__init__.py neutron/tests/tempest/services/network/__init__.py neutron/tests/tempest/services/network/json/__init__.py @@ -969,6 +1093,7 @@ neutron/tests/unit/test_wsgi.py neutron/tests/unit/testlib_api.py neutron/tests/unit/agent/__init__.py +neutron/tests/unit/agent/test_agent_extensions_manager.py neutron/tests/unit/agent/test_rpc.py neutron/tests/unit/agent/test_securitygroups_rpc.py neutron/tests/unit/agent/common/__init__.py @@ -979,8 +1104,9 @@ neutron/tests/unit/agent/dhcp/__init__.py neutron/tests/unit/agent/dhcp/test_agent.py neutron/tests/unit/agent/l2/__init__.py +neutron/tests/unit/agent/l2/test_l2_agent_extensions_manager.py neutron/tests/unit/agent/l2/extensions/__init__.py -neutron/tests/unit/agent/l2/extensions/test_manager.py +neutron/tests/unit/agent/l2/extensions/test_fdb_population.py neutron/tests/unit/agent/l2/extensions/test_qos.py neutron/tests/unit/agent/l3/__init__.py neutron/tests/unit/agent/l3/test_agent.py @@ -1036,6 +1162,7 @@ neutron/tests/unit/api/rpc/__init__.py neutron/tests/unit/api/rpc/agentnotifiers/__init__.py neutron/tests/unit/api/rpc/agentnotifiers/test_dhcp_rpc_agent_api.py +neutron/tests/unit/api/rpc/agentnotifiers/test_l3_rpc_agent_api.py neutron/tests/unit/api/rpc/callbacks/__init__.py neutron/tests/unit/api/rpc/callbacks/test_resource_manager.py neutron/tests/unit/api/rpc/callbacks/test_resources.py @@ -1084,6 +1211,7 @@ neutron/tests/unit/db/test_l3_dvr_db.py neutron/tests/unit/db/test_l3_hamode_db.py neutron/tests/unit/db/test_migration.py +neutron/tests/unit/db/test_model_base.py neutron/tests/unit/db/test_portsecurity_db.py neutron/tests/unit/db/test_portsecurity_db_common.py neutron/tests/unit/db/test_provisioning_blocks.py @@ -1122,6 +1250,7 @@ neutron/tests/unit/extensions/test_securitygroup.py neutron/tests/unit/extensions/test_segment.py neutron/tests/unit/extensions/test_servicetype.py +neutron/tests/unit/extensions/test_subnet_service_types.py neutron/tests/unit/extensions/test_tag.py neutron/tests/unit/extensions/test_timestamp_core.py neutron/tests/unit/extensions/test_vlantransparent.py @@ -1146,13 +1275,19 @@ neutron/tests/unit/objects/test_common_types.py neutron/tests/unit/objects/test_objects.py neutron/tests/unit/objects/test_rbac_db.py +neutron/tests/unit/objects/test_securitygroup.py neutron/tests/unit/objects/test_subnet.py neutron/tests/unit/objects/test_subnetpool.py neutron/tests/unit/objects/test_trunk.py +neutron/tests/unit/objects/test_utils.py neutron/tests/unit/objects/db/__init__.py neutron/tests/unit/objects/db/test_api.py neutron/tests/unit/objects/extensions/__init__.py neutron/tests/unit/objects/extensions/test_standardattributes.py +neutron/tests/unit/objects/network/__init__.py +neutron/tests/unit/objects/network/test_network_segment.py +neutron/tests/unit/objects/network/extensions/__init__.py +neutron/tests/unit/objects/network/extensions/test_port_security.py neutron/tests/unit/objects/port/__init__.py neutron/tests/unit/objects/port/extensions/__init__.py neutron/tests/unit/objects/port/extensions/test_allowedaddresspairs.py @@ -1195,6 +1330,7 @@ neutron/tests/unit/plugins/ml2/drivers/agent/__init__.py neutron/tests/unit/plugins/ml2/drivers/agent/test__agent_manager_base.py neutron/tests/unit/plugins/ml2/drivers/agent/test__common_agent.py +neutron/tests/unit/plugins/ml2/drivers/agent/test_capabilities.py neutron/tests/unit/plugins/ml2/drivers/l2pop/__init__.py neutron/tests/unit/plugins/ml2/drivers/l2pop/test_db.py neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py @@ -1230,8 +1366,10 @@ neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/fake_oflib.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/ovs_test_base.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_agent_extension_api.py +neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_capabilities.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py +neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_vlanmanager.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/__init__.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/extension_drivers/test_qos_driver.py neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/openflow/__init__.py @@ -1263,6 +1401,9 @@ neutron/tests/unit/services/test_provider_configuration.py neutron/tests/unit/services/auto_allocate/__init__.py neutron/tests/unit/services/auto_allocate/test_db.py +neutron/tests/unit/services/l3_router/__init__.py +neutron/tests/unit/services/l3_router/service_providers/__init__.py +neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py neutron/tests/unit/services/metering/__init__.py neutron/tests/unit/services/metering/test_metering_plugin.py neutron/tests/unit/services/metering/agents/__init__.py @@ -1276,12 +1417,28 @@ neutron/tests/unit/services/qos/notification_drivers/dummy.py neutron/tests/unit/services/qos/notification_drivers/test_manager.py neutron/tests/unit/services/qos/notification_drivers/test_message_queue.py +neutron/tests/unit/services/revisions/__init__.py +neutron/tests/unit/services/revisions/test_revision_plugin.py neutron/tests/unit/services/trunk/__init__.py -neutron/tests/unit/services/trunk/test_db.py +neutron/tests/unit/services/trunk/fakes.py +neutron/tests/unit/services/trunk/test_plugin.py +neutron/tests/unit/services/trunk/test_rules.py +neutron/tests/unit/services/trunk/test_utils.py +neutron/tests/unit/services/trunk/drivers/__init__.py +neutron/tests/unit/services/trunk/drivers/linuxbridge/__init__.py +neutron/tests/unit/services/trunk/drivers/linuxbridge/test_driver.py +neutron/tests/unit/services/trunk/drivers/openvswitch/__init__.py +neutron/tests/unit/services/trunk/drivers/openvswitch/test_driver.py +neutron/tests/unit/services/trunk/drivers/openvswitch/agent/__init__.py +neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_driver.py +neutron/tests/unit/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py +neutron/tests/unit/services/trunk/rpc/__init__.py +neutron/tests/unit/services/trunk/rpc/test_agent.py +neutron/tests/unit/services/trunk/rpc/test_backend.py +neutron/tests/unit/services/trunk/rpc/test_server.py neutron/tests/unit/tests/__init__.py neutron/tests/unit/tests/test_base.py neutron/tests/unit/tests/test_post_mortem_debug.py -neutron/tests/unit/tests/test_tools.py neutron/tests/unit/tests/common/__init__.py neutron/tests/unit/tests/common/test_net_helpers.py neutron/tests/unit/tests/example/README @@ -1312,12 +1469,14 @@ releasenotes/notes/add-port-rebinding-chance-33178b9abacf5804.yaml releasenotes/notes/add-rbac-qos-8b1154ee756c66df.yaml releasenotes/notes/add-standard-attr-descriptions-1ba0d7a454c3fd8f.yaml +releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml releasenotes/notes/add-tags-to-core-resources-b05330a129900609.yaml releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml releasenotes/notes/advertise_mtu_by_default-d8b0b056a74517b8.yaml releasenotes/notes/advertisement-intervals-for-radvd-configurable-6d85b5fdd97a2742.yaml releasenotes/notes/allow-non-admins-to-define-external-extra-routes-0d541fc356a5c546.yaml releasenotes/notes/bgp-support-ef361825ca63f28b.yaml +releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml releasenotes/notes/clear-allowed-address-pairs-with-none-4757bcca78076c9e.yaml releasenotes/notes/config-file-generation-2eafc6602d57178e.yaml releasenotes/notes/config-wsgi-pool-size-a4c06753b79fee6d.yaml @@ -1325,9 +1484,13 @@ releasenotes/notes/default-local-dns-a1c3fa1451f228fa.yaml releasenotes/notes/default-subnetpool-semantics-1cdc5cdde2be88c2.yaml releasenotes/notes/deprecate-advertise-mtu-51e3f78475a14efc.yaml +releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml releasenotes/notes/deprecate-force_gateway_on_subnet-376855c4e66f4e11.yaml +releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml +releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml releasenotes/notes/deprecate-network-device-mtu-59b78264c9974808.yaml releasenotes/notes/deprecate-router_id-34aca9ea5ee9e789.yaml +releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml releasenotes/notes/deprecate_max_fixed_ips_per_port-5e80518cbf25cfd6.yaml releasenotes/notes/deprecate_neutron_debug-a578e0adfc9cff4c.yaml releasenotes/notes/deprecate_prevent_arp_spoofing_option-a09e673fc8f9fee4.yaml @@ -1338,10 +1501,15 @@ releasenotes/notes/dvr-ha-support-cc67e84d9380cd0b.yaml releasenotes/notes/dvr-ovs-agent-6052a8d60fddde22.yaml releasenotes/notes/dvr-support-live-migration-b818b12bd9cbb518.yaml +releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml releasenotes/notes/end-to-end-mtu-00345fc4282cb8fb.yaml releasenotes/notes/fail-on-missing-extensions-bc332124b780875b.yaml +releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml releasenotes/notes/firewall_driver_not_needed_on_server-4159669ad834dea6.yaml +releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml releasenotes/notes/hyperv-neutron-agent-decomposition-ae6a052aeb48c6ac.yaml +releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml +releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml releasenotes/notes/linuxbridge-agent-extensions-66bdf9feee25ef99.yaml releasenotes/notes/linuxbridge_vxlan_arp_responder-e9ea91552e1b62a7.yaml releasenotes/notes/macvtap-l2-agent-2b551d8ec341196d.yaml @@ -1349,19 +1517,28 @@ releasenotes/notes/mtu-selection-and-advertisement-ab29f9ec43140224.yaml releasenotes/notes/network_ip_availability-d64bd7032b3c15ee.yaml releasenotes/notes/new-vif-type-for-pf-passthrough-33ec560b9b5d246f.yaml +releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml releasenotes/notes/oslo-cache-cache-url-deprecated-16cd3d335c5962eb.yaml releasenotes/notes/oslo-messaging-notifier-queue-d94677076a1db261.yaml releasenotes/notes/oslo-reports-166a169037bf64f2.yaml +releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml releasenotes/notes/ovs-ct-firewall-driver-52a70a6a16d06f59.yaml releasenotes/notes/ovs-ipv6-tunnel-endpoints-f41b4954a04c43f6.yaml +releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml releasenotes/notes/path-mtu-back-to-zero-e4f9e8bdd8317ad4.yaml releasenotes/notes/physical_network-aware-dhcp-scheduling-94e9fadc7c7c5fec.yaml +releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml releasenotes/notes/remove-driver-60eb7e26d95f7322.yaml releasenotes/notes/remove-force_gateway_on_subnet-77cb79f0b35d0c6d.yaml +releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml +releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml releasenotes/notes/remove-router_id-b3732089f8f1faa1.yaml +releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml +releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml releasenotes/notes/rm-notify-entry-points-aa442134a780469a.yaml releasenotes/notes/segment_mtu_to_global_physnet_mtu-9cee5ff09557edeb.yaml releasenotes/notes/set-of-default-qos-burst-value-0790773703fa08fc.yaml +releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml releasenotes/notes/sriov_allow_use_many_nics_for_one_physnet-3570aa67a60ce6c4.yaml releasenotes/notes/sriov_show_l2_agent_extensions-ca852e155a529e99.yaml releasenotes/notes/use-keystoneauth-24f309566001a16b.yaml @@ -1381,6 +1558,7 @@ tools/generate_config_file_samples.sh tools/install_venv.py tools/install_venv_common.py +tools/list_moved_globals.py tools/milestone-review-dash.py tools/misc-sanity-checks.sh tools/ostestr_compat_shim.sh diff -Nru neutron-9.0.0~b2~dev280/PKG-INFO neutron-9.0.0~b3~dev557/PKG-INFO --- neutron-9.0.0~b2~dev280/PKG-INFO 2016-06-27 15:31:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/PKG-INFO 2016-08-29 20:06:03.000000000 +0000 @@ -1,8 +1,8 @@ Metadata-Version: 1.1 Name: neutron -Version: 9.0.0.0b2.dev280 +Version: 9.0.0.0b3.dev557 Summary: OpenStack Networking -Home-page: http://www.openstack.org/ +Home-page: http://docs.openstack.org/developer/neutron/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN @@ -23,13 +23,16 @@ available at: . This includes: Neutron Administrator Guide - http://docs.openstack.org/admin-guide-cloud/networking.html + http://docs.openstack.org/admin-guide/networking.html + + Neutron Developer Guide + http://docs.openstack.org/developer/neutron/devref/ Networking Guide http://docs.openstack.org/networking-guide/ Neutron API Reference: - http://docs.openstack.org/api/openstack-network/2.0/content/ + http://developer.openstack.org/api-ref/networking/v2/ Current Neutron developer documentation is available at: http://wiki.openstack.org/NeutronDevelopment @@ -52,3 +55,4 @@ Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 diff -Nru neutron-9.0.0~b2~dev280/rally-jobs/neutron-neutron.yaml neutron-9.0.0~b3~dev557/rally-jobs/neutron-neutron.yaml --- neutron-9.0.0~b2~dev280/rally-jobs/neutron-neutron.yaml 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/rally-jobs/neutron-neutron.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -3,7 +3,7 @@ - runner: type: "constant" - times: 40 + times: 100 concurrency: 20 context: users: @@ -11,8 +11,11 @@ users_per_tenant: 1 quotas: neutron: - network: -1 + network: 100 sla: + max_avg_duration_per_atomic: + neutron.list_networks: 15 # reduce as perf is fixed + failure_rate: max: 0 @@ -66,7 +69,7 @@ args: network_create_args: port_create_args: - ports_per_network: 20 + ports_per_network: 100 runner: type: "constant" times: 40 @@ -80,10 +83,10 @@ network: -1 subnet: -1 router: -1 - port: -1 + port: 4040 # (ports per network + 1 dhcp) * times sla: max_avg_duration_per_atomic: - neutron.list_ports: 5 + neutron.list_ports: 15 # reduce as perf is fixed failure_rate: max: 0 diff -Nru neutron-9.0.0~b2~dev280/README.rst neutron-9.0.0~b3~dev557/README.rst --- neutron-9.0.0~b2~dev280/README.rst 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/README.rst 2016-08-03 20:10:33.000000000 +0000 @@ -15,13 +15,16 @@ available at: . This includes: Neutron Administrator Guide - http://docs.openstack.org/admin-guide-cloud/networking.html + http://docs.openstack.org/admin-guide/networking.html + +Neutron Developer Guide + http://docs.openstack.org/developer/neutron/devref/ Networking Guide http://docs.openstack.org/networking-guide/ Neutron API Reference: - http://docs.openstack.org/api/openstack-network/2.0/content/ + http://developer.openstack.org/api-ref/networking/v2/ Current Neutron developer documentation is available at: http://wiki.openstack.org/NeutronDevelopment diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/add-subnet-service-types-bc81f6df9834f96e.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,15 @@ +--- +features: + - Subnets now have a new property 'service_types'. + This is a list of port device owners, such that + only ports with a matching device owner will be + given an IP from this subnet. If no matching + service subnet exists for the given device owner, + or no service subnets have been defined on the + network, the port will be assigned an IP from a + subnet with no service-types. This preserves + backwards compatibility with older deployments. +upgrade: + - A new table 'subnet_service_types' has been added + to cater for this feature. It uses the ID field + from the 'subnets' table as a foreign key. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/add-timestamp-fields-f9ab949fc88f05f6.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -1,9 +1,9 @@ --- prelude: > - Timestamp fields are now added to neutron core resources. + Timestamp fields have been added to neutron core resources. features: - - Add timestamp fields 'created_at', 'updated_at' into neutron core - resources like network, subnet, port and subnetpool. - - And support for querying these resources by changed-since, it will - return the resources changed after the specfic time string like - YYYY-MM-DDTHH:MM:SS + - Add timestamp fields ``created_at``, ``updated_at`` into neutron core + resources for example networks, subnets, ports and subnetpools. + - These resources can now be queried by ``changed-since``, which + returns the resources changed after a specific time string like + ``YYYY-MM-DDTHH:MM:SS``. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/change_external_network_bridge_default-5de3a0c19182eb70.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,16 @@ +--- +prelude: > + The default value for 'external_network_bridge' in the L3 agent is now ''. +upgrade: + - The default value for 'external_network_bridge' has been changed to '' + since that is the preferred way to configure the L3 agent and will be the + only way in future releases. If you have not explicitly set this value + and you use the L3 agent, you will need to set this value to 'br-ex' to + match the old default. + If you are using 'br-ex', you should switch to '', ensure your external + network has a flat segment and ensure your L2 agent has a bridge_mapping + entry between the external network's flat segment physnet and 'br-ex' to + get the same connectivity. If the external network did not already have + the flat segment, you will need to detach all routers from the external + networks, delete the incorrect segment type, add the flat segment, and + re-attach the routers. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-allow-sorting-allow-pagination-4549c92a74cfe15d.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,4 @@ +--- +deprecations: + - The ``allow_sorting`` and ``allow_pagination`` configuration options + are deprecated and will be removed in a future release. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-implicit-service-providers-loading-703f984b90351bf0.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,7 @@ +--- +deprecations: + - Neutron controller service currently allows to load ``service_providers`` + options from some files that are not passed to it via --config-dir or + --config-dir CLI options. This behaviour is now deprecated and will be + disabled in Ocata. Current users are advised to switch to aforementioned + CLI options. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-min-l3-agents-per-router-15ddaa4c178b23df.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,5 @@ +--- +deprecations: + - The option min_l3_agents_per_router is deprecated and will be + removed for the Ocata release where the scheduling of new HA + routers will always be allowed. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/deprecate-supported_pci_vendor_devs-12279b70a1f1fe8e.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,6 @@ +--- +deprecations: + - The 'supported_pci_vendor_devs' option is deprecated in Newton and will + be removed in Ocata. The validation of supported pci vendors is done in + nova-scheduler through the pci_passthrough_whitelist option when it + selects a suitable hypervisor, hence the option is considered redundant. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/dscp-qos-77ea9b27d3762e48.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/dscp-qos-77ea9b27d3762e48.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/dscp-qos-77ea9b27d3762e48.yaml 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/dscp-qos-77ea9b27d3762e48.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -6,6 +6,6 @@ features: - Neutron can apply a QoS rule to ports that mark outgoing traffic's type of service packet header field. - - The Open vSwitch Neutron agent has been extended to mark the type of - service packet header field of packets egressing from the VM when the + - The Open vSwitch Neutron agent has been extended to mark the Type of + Service IP header field of packets egressing from the VM when the QoS rule has been applied. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/enable-sorting-pagination-754390289d3311fa.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,3 @@ +--- +upgrade: + - API sorting and pagination features are now enabled by default. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/fdb_population-70d751c8c2e4395f.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,17 @@ +--- +fixes: + - In order to fix the communication issues between + SR-IOV instances and regular instances + the FDB population extension is added to the + OVS or linuxbridge agent. + the cause was that messages from SR-IOV direct port + instance to normal port instances located on + the same hypervisor were sent directly to the wire + because the FDB table was not yet updated. + FDB population extension tracks instances + boot/delete operations using the handle_port + delete_port extension interface messages + and update the hypervisor's FDB table accordingly. + + Please note this L2 agent extension doesn't support + allowed address pairs extension. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/fix-mtu-for-existing-networks-5a476cde9bc46a53.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,12 @@ +--- +features: + - net-mtu extension now recalculates network MTU on each network access, not + just on creation. It now allows operators to tweak MTU related + configuration options and see them applied to all network resources right + after controller restart, both old and new. +upgrade: + - Existing networks with MTU values that don't reflect configuration + will receive new MTU values after controller upgrade. Note that to + propagate new correct MTU values to your backend, you may need to resync + all agents that set up ports, as well as re-attach VIFs to affected + instances. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/l2_adjacency-e6e54e5ff9aad9b7.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,7 @@ +--- +features: + - | + The new l2_adjacency extension adds an l2_adjacency field to the network, + to indicate whether or not there is guaranteed L2 adjacency between the + ports on that Network. Routed network implementations would typically set + l2_adjacency to False. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/l3-agent-extensions-b348ff26aec0fe88.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,5 @@ +--- +features: + - The neutron L3 agent now has the ability to load + agent extensions, which allows other services to + integrate without additional agent changes. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/of_interface-native-by-default-0c07bdbd7365230a.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,15 @@ +--- +prelude: > + Prior to Newton, the neutron-openvswitch-agent used 'ovs-ofctl' + of_interface driver by default. In Newton, 'of_interface' + defaults to 'native'. This mostly eliminates spawning ovs-ofctl + and improves performance a little. +upgrade: + - To retain the old default for neutron-openvswitch-agent, use + 'of_interface = ovs-ofctl' in the '[ovs]' section of your + openvswitch agent configuration file. + - By default, the native interface will have the Ryu controller + listen on 127.0.0.1:6633. The listen address can be configured + with of_listen_address and of_listen_port options. Ensure that + the controller has permission to listen at the configured + address. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/overlay_ip_version-ml2-e6438b570844ef5c.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,17 @@ +--- +prelude: > + Properly calculate overlay (tunnel) protocol overhead for + environments using IPv4 or IPv6 endpoints. The ML2 plug-in + configuration file contains a new configuration option, + 'overlay_ip_version', in the '[ml2]' section that indicates + the IP version of all overlay network endpoints. Use '4' for + IPv4 and '6' for IPv6. Defaults to '4'. Additionally, all + layer-2 agents must use the same IP version for endpoints. +upgrade: + - Define the 'overlay_ip_version' option and value + appropriate for the environment. Only required if not + using the Default of '4'. +other: + - The value of the 'overlay_ip_version' option adds either + 20 bytes for IPv4 or 40 bytes for IPv6 to determine the total + tunnel overhead amount. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/ovsdb-native-by-default-38835d6963592396.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,19 @@ +--- +prelude: > + Prior to Newton, the default option for 'ovsdb_interface' + was 'vsctl'. In Newton 'ovsdb_interface' defaults to + 'native'. This change switches the way of communication + with OVSDB from the ovs-vsctl tool to Open vSwitch python + api to improve out-of-the-box performance for typical + deployments. +upgrade: + - To keep the old default value use 'ovsdb_interface = vsctl' + in '[ovs]' section of openvswitch_agent.ini + (common path '/etc/neutron/plugins/ml2/openvswitch_agent.ini') + if there is a separate openvswitch agent configuration file; + otherwise apply changes mentioned above to ml2_conf.ini + (common path '/etc/neutron/plugins/ml2/ml2_conf.ini'). + - The native interface configures ovsdb-server to listen for + connections on 127.0.0.1:6640 by default. The address can be + configured with the ovsdb_connection config option. Ensure that + ovsdb-server has permissions to listen on the configured address. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/pluggable-ipam-is-default-15c2ee15dc5b4a7b.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,12 @@ +--- +prelude: > + The internal pluggable IPAM implementation -- added in the Liberty release + -- is now the default for both old and new deployments. Old deployments + are unconditionally switched to pluggable IPAM during upgrade. + Old non-pluggable IPAM is deprecated and removed from code base. +upgrade: + - During upgrade 'internal' ipam driver becomes default for 'ipam_driver' + config option and data is migrated to new tables using alembic migration. +deprecations: + - The non-pluggable ipam implementatios is deprecated and will be removed in + Newton release cycle. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/remove-network_device_mtu-option-a1a96e99dc7f0a02.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,8 @@ +--- +upgrade: + - The network_device_mtu option is removed. Existing + users of the option are advised to adopt new + configuration options to accommodate for their + underlying physical infrastructure. The relevant + options are global_physnet_mtu for all plugins, + and also path_mtu and physical_network_mtus for ML2. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/remove-quota_items-d50b4672dd31ea3e.yaml 2016-08-03 20:10:34.000000000 +0000 @@ -0,0 +1,8 @@ +--- +prelude: > + Remove 'quota_items' configuration option from + neutron.conf file. This option was deprecated since + Liberty release and has no effect now. +upgrade: + - Remove 'quota_items' configuration option from + neutron.conf file. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/remove-subnetpool-config-b15dbe59237aee7e.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,5 @@ +--- +upgrade: + - The configuration options for ``default_ipv4_subnet_pool`` and + ``default_ipv6_subnet_pool`` have been removed. Please use the + ``is_default`` option of the create/update subnetpool API instead. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/rename-tenant-to-project-b19a4068f8625969.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,5 @@ +--- +upgrade: + - | + tenant_id column has been renamed to project_id. + This database migration is required to be applied as offline migration. diff -Nru neutron-9.0.0~b2~dev280/releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml neutron-9.0.0~b3~dev557/releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml --- neutron-9.0.0~b2~dev280/releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml 1970-01-01 00:00:00.000000000 +0000 +++ neutron-9.0.0~b3~dev557/releasenotes/notes/sorting-pagination-extensions-e66e99e2a8f5e563.yaml 2016-08-29 20:05:49.000000000 +0000 @@ -0,0 +1,6 @@ +--- +features: + - New API extensions, 'sorting' and 'pagination', have been added to allow + API users to detect if sorting and pagination features are enabled. These + features are controlled by ``allow_sorting`` and ``allow_pagination`` + configuration options. diff -Nru neutron-9.0.0~b2~dev280/requirements.txt neutron-9.0.0~b3~dev557/requirements.txt --- neutron-9.0.0~b2~dev280/requirements.txt 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/requirements.txt 2016-08-29 20:05:49.000000000 +0000 @@ -9,7 +9,7 @@ Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT debtcollector>=1.2.0 # Apache-2.0 eventlet!=0.18.3,>=0.18.2 # MIT -pecan>=1.0.0 # BSD +pecan!=1.0.2,!=1.0.3,!=1.0.4,>=1.0.0 # BSD greenlet>=0.3.2 # MIT httplib2>=0.7.5 # MIT requests>=2.10.0 # Apache-2.0 @@ -17,35 +17,35 @@ keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0 netaddr!=0.7.16,>=0.7.12 # BSD netifaces>=0.10.4 # MIT -neutron-lib>=0.2.0 # Apache-2.0 -python-neutronclient>=4.2.0 # Apache-2.0 +neutron-lib>=0.4.0 # Apache-2.0 +python-neutronclient>=5.1.0 # Apache-2.0 retrying!=1.3.0,>=1.2.3 # Apache-2.0 -ryu!=4.1,!=4.2,!=4.2.1,>=3.30 # Apache-2.0 +ryu!=4.1,!=4.2,!=4.2.1,!=4.4,>=3.30 # Apache-2.0 SQLAlchemy<1.1.0,>=1.0.10 # MIT WebOb>=1.2.3 # MIT -keystoneauth1>=2.7.0 # Apache-2.0 +keystoneauth1>=2.10.0 # Apache-2.0 alembic>=0.8.4 # MIT six>=1.9.0 # MIT -stevedore>=1.10.0 # Apache-2.0 +stevedore>=1.16.0 # Apache-2.0 oslo.cache>=1.5.0 # Apache-2.0 oslo.concurrency>=3.8.0 # Apache-2.0 -oslo.config>=3.10.0 # Apache-2.0 -oslo.context>=2.4.0 # Apache-2.0 -oslo.db>=4.1.0 # Apache-2.0 +oslo.config>=3.14.0 # Apache-2.0 +oslo.context>=2.9.0 # Apache-2.0 +oslo.db>=4.10.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=1.14.0 # Apache-2.0 oslo.messaging>=5.2.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.policy>=1.9.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 -oslo.rootwrap>=2.0.0 # Apache-2.0 +oslo.rootwrap>=5.0.0 # Apache-2.0 oslo.serialization>=1.10.0 # Apache-2.0 oslo.service>=1.10.0 # Apache-2.0 -oslo.utils>=3.11.0 # Apache-2.0 -oslo.versionedobjects>=1.9.1 # Apache-2.0 -osprofiler>=1.3.0 # Apache-2.0 +oslo.utils>=3.16.0 # Apache-2.0 +oslo.versionedobjects>=1.13.0 # Apache-2.0 +osprofiler>=1.4.0 # Apache-2.0 ovs>=2.5.0;python_version=='2.7' # Apache-2.0 -ovs>=2.6.0.dev1;python_version>='3.4' # Apache-2.0 +ovs>=2.6.0.dev3;python_version>='3.4' # Apache-2.0 python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0 python-designateclient>=1.5.0 # Apache-2.0 diff -Nru neutron-9.0.0~b2~dev280/setup.cfg neutron-9.0.0~b3~dev557/setup.cfg --- neutron-9.0.0~b2~dev280/setup.cfg 2016-06-27 15:31:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/setup.cfg 2016-08-29 20:06:03.000000000 +0000 @@ -5,7 +5,7 @@ README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org -home-page = http://www.openstack.org/ +home-page = http://docs.openstack.org/developer/neutron/ classifier = Environment :: OpenStack Intended Audience :: Information Technology @@ -17,6 +17,7 @@ Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.4 + Programming Language :: Python :: 3.5 [files] packages = @@ -81,7 +82,9 @@ auto_allocate = neutron.services.auto_allocate.plugin:Plugin segments = neutron.services.segments.plugin:Plugin network_ip_availability = neutron.services.network_ip_availability.plugin:NetworkIPAvailabilityPlugin + revisions = neutron.services.revisions.revision_plugin:RevisionPlugin timestamp_core = neutron.services.timestamp.timestamp_plugin:TimeStampPlugin + trunk = neutron.services.trunk.plugin:TrunkPlugin neutron.qos.notification_drivers = message_queue = neutron.services.qos.notification_drivers.message_queue:RpcQosServiceNotificationDriver neutron.ml2.type_drivers = @@ -111,6 +114,7 @@ internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool neutron.agent.l2.extensions = qos = neutron.agent.l2.extensions.qos:QosAgentExtension + fdb = neutron.agent.l2.extensions.fdb_population:FdbPopulationAgentExtension neutron.qos.agent_drivers = ovs = neutron.plugins.ml2.drivers.openvswitch.agent.extension_drivers.qos_driver:QosOVSAgentDriver sriov = neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers.qos_driver:QosSRIOVAgentDriver diff -Nru neutron-9.0.0~b2~dev280/TESTING.rst neutron-9.0.0~b3~dev557/TESTING.rst --- neutron-9.0.0~b2~dev280/TESTING.rst 2016-06-08 18:00:11.000000000 +0000 +++ neutron-9.0.0~b3~dev557/TESTING.rst 2016-08-03 20:10:33.000000000 +0000 @@ -121,6 +121,28 @@ This is allowed by the fact that the method was built to be testable - The method has clear input and output with no side effects. +You can get oslo.db to generate a file-based sqlite database by setting +OS_TEST_DBAPI_ADMIN_CONNECTION to a file based URL as described in `this +mailing list post`__. This file will be created but (confusingly) won't be the +actual file used for the database. To find the actual file, set a break point +in your test method and inspect self.engine.url. + +__ file-based-sqlite_ + +.. code-block:: shell + + $ OS_TEST_DBAPI_ADMIN_CONNECTION=sqlite:///sqlite.db .tox/py27/bin/python -m \ + testtools.run neutron.tests.unit... + ... + (Pdb) self.engine.url + sqlite:////tmp/iwbgvhbshp.db + +Now, you can inspect this file using sqlite3. + +.. code-block:: shell + + $ sqlite3 /tmp/iwbgvhbshp.db + Functional Tests ~~~~~~~~~~~~~~~~ @@ -637,3 +659,4 @@ .. [#pudb] PUDB debugger: https://pypi.python.org/pypi/pudb +.. _file-based-sqlite: http://lists.openstack.org/pipermail/openstack-dev/2016-July/099861.html diff -Nru neutron-9.0.0~b2~dev280/test-requirements.txt neutron-9.0.0~b3~dev557/test-requirements.txt --- neutron-9.0.0~b2~dev280/test-requirements.txt 2016-06-24 21:02:52.000000000 +0000 +++ neutron-9.0.0~b3~dev557/test-requirements.txt 2016-08-03 20:10:34.000000000 +0000 @@ -7,7 +7,7 @@ fixtures>=3.0.0 # Apache-2.0/BSD mock>=2.0 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD -requests-mock>=0.7.0 # Apache-2.0 +requests-mock>=1.0 # Apache-2.0 sphinx!=1.3b1,<1.3,>=1.2.1 # BSD oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD @@ -19,7 +19,7 @@ os-testr>=0.7.0 # Apache-2.0 ddt>=1.0.1 # MIT pylint==1.4.5 # GPLv2 -reno>=1.6.2 # Apache2 +reno>=1.8.0 # Apache2 # Needed to run DB commands in virtualenvs PyMySQL>=0.6.2 # MIT License -tempest>=11.0.0 # Apache-2.0 +tempest>=12.1.0 # Apache-2.0 diff -Nru neutron-9.0.0~b2~dev280/tools/abandon_old_reviews.sh neutron-9.0.0~b3~dev557/tools/abandon_old_reviews.sh --- neutron-9.0.0~b2~dev280/tools/abandon_old_reviews.sh 2016-05-12 19:51:41.000000000 +0000 +++ neutron-9.0.0~b3~dev557/tools/abandon_old_reviews.sh 2016-08-03 20:10:34.000000000 +0000 @@ -26,7 +26,7 @@ # Note: due to gerrit bug somewhere, this double posts messages. :( -# first purge the all reviews that are more than 4w old and blocked by a core -2 +# first purge all the reviews that are more than 4w old and blocked by a core -2 if [ "$1" = "--dry-run" ]; then echo "Enabling dry run mode" @@ -50,9 +50,19 @@ fi } -PROJECTS="(project:openstack/neutron OR project:openstack/neutron-fwaas OR \ - project:openstack/neutron-lbaas OR project:openstack/neutron-vpnaas OR \ - project:openstack/python-neutronclient OR project:openstack/neutron-specs)" +PROJECTS="($( +python - <