diff -Nru magnum-6.0.1/AUTHORS magnum-6.1.0/AUTHORS --- magnum-6.0.1/AUTHORS 2018-02-09 15:27:51.000000000 +0000 +++ magnum-6.1.0/AUTHORS 2018-02-23 14:05:48.000000000 +0000 @@ -46,6 +46,7 @@ Cristovao Cordeiro Dane LeBlanc Daneyon Hansen +Daniel Abad Danil Golov Davanum Srinivas Davanum Srinivas @@ -67,6 +68,8 @@ Fenghuafang <449171342@qq.com> Ferenc Horváth Flavio Percoco +Florian Haas +Georgiy Kutsurua Grzegorz Grasza Gyorgy Szombathelyi HackToday @@ -109,6 +112,7 @@ Lars Butler Lin Lin Yang +Lingxian Kong Lu lei Luong Anh Tuan M V P Nitesh diff -Nru magnum-6.0.1/ChangeLog magnum-6.1.0/ChangeLog --- magnum-6.0.1/ChangeLog 2018-02-09 15:27:51.000000000 +0000 +++ magnum-6.1.0/ChangeLog 2018-02-23 14:05:48.000000000 +0000 @@ -1,6 +1,30 @@ CHANGES ======= +6.1.0 +----- + +* Update kubernetes dashboard to v1.8.3 +* k8s: allow passing extra options to kube daemons +* [kubernetes] add ingress controller +* kuberntes: Disable the scale\_manager for scale down +* Run etcd and flanneld in a system container +* Admin can now delete clusters in any project +* Driver's name are case sensitive +* Support calico as network driver +* Add support for Octavia resources in Heat +* Using v1.9.3 as default k8s version +* Add disabled\_drivers config option +* federation api: api endpoints +* [k8s] allow enabling kubernetes cert manager api +* Now user can update label values in cluster-template +* Add missed space in k8s template file +* Document use of kube\_tag label +* Updated from global requirements +* k8s: Fix kubelet, add RBAC and pass e2e tests +* Update UPPER\_CONSTRAINTS\_FILE for stable/queens +* Update .gitreview for stable/queens + 6.0.1 ----- diff -Nru magnum-6.0.1/debian/changelog magnum-6.1.0/debian/changelog --- magnum-6.0.1/debian/changelog 2018-02-21 19:24:29.000000000 +0000 +++ magnum-6.1.0/debian/changelog 2018-02-27 00:24:31.000000000 +0000 @@ -1,8 +1,16 @@ -magnum (6.0.1-0ubuntu1~cloud0) xenial-queens; urgency=medium +magnum (6.1.0-0ubuntu1~cloud0) xenial-queens; urgency=medium * New upstream release for the Ubuntu Cloud Archive. - -- Openstack Ubuntu Testing Bot Wed, 21 Feb 2018 19:24:29 +0000 + -- Openstack Ubuntu Testing Bot Tue, 27 Feb 2018 00:24:31 +0000 + +magnum (6.1.0-0ubuntu1) bionic; urgency=medium + + * New upstream point release for OpenStack Queens. + * d/control: Align (Build-)Depends with upstream. + * d/p/use-lower-kubernetesclient.patch: Rebased. + + -- Corey Bryant Mon, 26 Feb 2018 17:48:34 -0500 magnum (6.0.1-0ubuntu1) bionic; urgency=medium diff -Nru magnum-6.0.1/debian/control magnum-6.1.0/debian/control --- magnum-6.0.1/debian/control 2018-02-14 17:09:33.000000000 +0000 +++ magnum-6.1.0/debian/control 2018-02-26 22:48:34.000000000 +0000 @@ -47,15 +47,15 @@ python-netaddr (>= 0.7.18), python-neutronclient (>= 1:6.3.0), python-novaclient (>= 2:9.1.0), - python-openstackdocstheme (>= 1.17.0), + python-openstackdocstheme (>= 1.18.1), python-os-api-ref (>= 1.4.0), python-os-testr (>= 1.0.0), - python-oslo.concurrency (>= 3.20.0), + python-oslo.concurrency (>= 3.25.0), python-oslo.config (>= 1:5.1.0), python-oslo.context (>= 1:2.19.2), python-oslo.db (>= 4.27.0), python-oslo.i18n (>= 3.15.3), - python-oslo.log (>= 3.30.0), + python-oslo.log (>= 3.36.0), python-oslo.messaging (>= 5.29.0), python-oslo.middleware (>= 3.31.0), python-oslo.policy (>= 1.30.0), @@ -63,8 +63,8 @@ python-oslo.serialization (>= 2.18.0), python-oslo.service (>= 1.24.0), python-oslo.utils (>= 3.33.0), - python-oslo.versionedobjects (>= 1.28.0), - python-oslotest (>= 1:1.10.0), + python-oslo.versionedobjects (>= 1.31.2), + python-oslotest (>= 1:3.2.0), python-osprofiler (>= 1.4.0), python-paramiko (>= 2.0), python-pecan (>= 1.0.0), @@ -76,7 +76,7 @@ python-sqlalchemy (>= 1.0.10), python-stevedore (>= 1:1.20.0), python-subunit (>= 1.0.0), - python-taskflow (>= 2.7.0), + python-taskflow (>= 2.16.0), python-tempest (>= 1:16.1.0), python-testrepository (>= 0.0.18), python-testscenarios (>= 0.4), @@ -183,12 +183,12 @@ python-netaddr (>= 0.7.18), python-neutronclient (>= 1:6.3.0), python-novaclient (>= 2:9.1.0), - python-oslo.concurrency (>= 3.20.0), + python-oslo.concurrency (>= 3.25.0), python-oslo.config (>= 1:5.1.0), python-oslo.context (>= 1:2.19.2), python-oslo.db (>= 4.27.0), python-oslo.i18n (>= 3.15.3), - python-oslo.log (>= 3.30.0), + python-oslo.log (>= 3.36.0), python-oslo.messaging (>= 5.29.0), python-oslo.middleware (>= 3.31.0), python-oslo.policy (>= 1.30.0), @@ -196,7 +196,7 @@ python-oslo.serialization (>= 2.18.0), python-oslo.service (>= 1.24.0), python-oslo.utils (>= 3.33.0), - python-oslo.versionedobjects (>= 1.28.0), + python-oslo.versionedobjects (>= 1.31.2), python-osprofiler (>= 1.4.0), python-paramiko (>= 2.0), python-pbr (>= 2.0.0), @@ -208,7 +208,7 @@ python-six (>= 1.10.0), python-sqlalchemy (>= 1.0.10), python-stevedore (>= 1:1.20.0), - python-taskflow (>= 2.7.0), + python-taskflow (>= 2.16.0), python-urllib3 (>= 1.15.1), python-webob (>= 1:1.7.1), python-werkzeug (>= 0.7), diff -Nru magnum-6.0.1/debian/patches/use-lower-kubernetesclient.patch magnum-6.1.0/debian/patches/use-lower-kubernetesclient.patch --- magnum-6.0.1/debian/patches/use-lower-kubernetesclient.patch 2018-02-14 17:09:33.000000000 +0000 +++ magnum-6.1.0/debian/patches/use-lower-kubernetesclient.patch 2018-02-26 22:48:34.000000000 +0000 @@ -22,7 +22,7 @@ --- a/magnum/tests/functional/python_client_base.py +++ b/magnum/tests/functional/python_client_base.py -@@ -440,23 +440,23 @@ +@@ -445,23 +445,23 @@ def setUpClass(cls): super(BaseK8sTest, cls).setUpClass() cls.kube_api_url = cls.cs.clusters.get(cls.cluster.uuid).api_address @@ -60,4 +60,4 @@ +kubernetes>=1.0.0 # Apache-2.0 marathon!=0.9.1,>=0.8.6 # MIT netaddr>=0.7.18 # BSD - oslo.concurrency>=3.20.0 # Apache-2.0 + oslo.concurrency>=3.25.0 # Apache-2.0 diff -Nru magnum-6.0.1/devstack/plugin.sh magnum-6.1.0/devstack/plugin.sh --- magnum-6.0.1/devstack/plugin.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/devstack/plugin.sh 2018-02-23 14:02:04.000000000 +0000 @@ -13,7 +13,7 @@ echo_summary "Installing magnum" install_magnum - MAGNUM_GUEST_IMAGE_URL=${MAGNUM_GUEST_IMAGE_URL:-"https://download.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-26-20170723.0/CloudImages/x86_64/images/Fedora-Atomic-26-20170723.0.x86_64.qcow2"} + MAGNUM_GUEST_IMAGE_URL=${MAGNUM_GUEST_IMAGE_URL:-"https://download.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-27-20180212.2/CloudImages/x86_64/images/Fedora-Atomic-27-20180212.2.x86_64.qcow2"} IMAGE_URLS+=",${MAGNUM_GUEST_IMAGE_URL}" LIBS_FROM_GIT="${LIBS_FROM_GIT},python-magnumclient" diff -Nru magnum-6.0.1/doc/source/contributor/quickstart.rst magnum-6.1.0/doc/source/contributor/quickstart.rst --- magnum-6.0.1/doc/source/contributor/quickstart.rst 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/doc/source/contributor/quickstart.rst 2018-02-23 14:02:11.000000000 +0000 @@ -208,7 +208,7 @@ +--------------------------------------+------------------------------------+--------+ | ID | Name | Status | +--------------------------------------+------------------------------------+--------+ - | 0bc132b1-ee91-4bd8-b0fd-19deb57fb39f | Fedora-Atomic-26-20170723.0.x86_64 | active | + | 0bc132b1-ee91-4bd8-b0fd-19deb57fb39f | Fedora-Atomic-27-20180212.2.x86_64 | active | | 7537bbf2-f1c3-47da-97bb-38c09007e146 | cirros-0.3.5-x86_64-disk | active | +--------------------------------------+------------------------------------+--------+ @@ -253,7 +253,7 @@ Fedora Atomic:: openstack coe cluster template create k8s-cluster-template \ - --image Fedora-Atomic-26-20170723.0.x86_64 \ + --image Fedora-Atomic-27-20180212.2.x86_64 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ @@ -397,7 +397,12 @@ req_extensions = req_ext prompt = no [req_distinguished_name] - CN = Your Name + CN = admin + O = system:masters + OU=OpenStack/Magnum + C=US + ST=TX + L=Austin [req_ext] extendedKeyUsage = clientAuth END @@ -524,7 +529,7 @@ 'swarm' as the COE:: openstack coe cluster template create swarm-cluster-template \ - --image Fedora-Atomic-26-20170723.0.x86_64 \ + --image Fedora-Atomic-27-20180212.2.x86_64 \ --keypair testkey \ --external-network public \ --dns-nameserver 8.8.8.8 \ diff -Nru magnum-6.0.1/doc/source/install/launch-instance.rst magnum-6.1.0/doc/source/install/launch-instance.rst --- magnum-6.0.1/doc/source/install/launch-instance.rst 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/doc/source/install/launch-instance.rst 2018-02-23 14:02:04.000000000 +0000 @@ -116,7 +116,7 @@ .. code-block:: console - $ wget https://download.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-26-20170723.0/CloudImages/x86_64/images/Fedora-Atomic-26-20170723.0.x86_64.qcow2 + $ wget https://download.fedoraproject.org/pub/alt/atomic/stable/Fedora-Atomic-27-20180212.2/CloudImages/x86_64/images/Fedora-Atomic-27-20180212.2.x86_64.qcow2 #. Register the image to the Image service setting the ``os_distro`` property to ``fedora-atomic``: @@ -126,7 +126,7 @@ $ openstack image create \ --disk-format=qcow2 \ --container-format=bare \ - --file=Fedora-Atomic-26-20170723.0.x86_64.qcow2\ + --file=Fedora-Atomic-27-20180212.2.x86_64.qcow2\ --property os_distro='fedora-atomic' \ fedora-atomic-latest +------------------+------------------------------------------------------+ diff -Nru magnum-6.0.1/doc/source/user/index.rst magnum-6.1.0/doc/source/user/index.rst --- magnum-6.0.1/doc/source/user/index.rst 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/doc/source/user/index.rst 2018-02-23 14:02:04.000000000 +0000 @@ -130,7 +130,8 @@ Mesos ubuntu ========== ===================== - This is a mandatory parameter and there is no default value. + This is a mandatory parameter and there is no default value. Note that the + os_distro attribute is case sensitive. --keypair \ The name of the SSH keypair to configure in the cluster servers @@ -175,11 +176,13 @@ =========== ================= ======== COE Network-Driver Default =========== ================= ======== - Kubernetes Flannel Flannel - Swarm Docker, Flannel Flannel - Mesos Docker Docker + Kubernetes flannel, calico flannel + Swarm docker, flannel flannel + Mesos docker docker =========== ================= ======== + Note that the network driver name is case sensitive. + --volume-driver \ The name of a volume driver for managing the persistent storage for the containers. The functionality supported are specific to the @@ -188,11 +191,13 @@ ============= ============= =========== COE Volume-Driver Default ============= ============= =========== - Kubernetes Cinder No Driver - Swarm Rexray No Driver - Mesos Rexray No Driver + Kubernetes cinder No Driver + Swarm rexray No Driver + Mesos rexray No Driver ============= ============= =========== + Note that the volume driver name is case sensitive. + --dns-nameserver \ The DNS nameserver for the servers and containers in the cluster to use. This is configured in the private Neutron network for the cluster. The @@ -331,9 +336,18 @@ +---------------------------------------+--------------------+---------------+ | `grafana_admin_passwd`_ | (any string) | "admin" | +---------------------------------------+--------------------+---------------+ +| `kube_tag`_ | see below | see below | ++---------------------------------------+--------------------+---------------+ +| `etcd_tag`_ | see below | see below | ++---------------------------------------+--------------------+---------------+ +| `flannel_tag`_ | see below | see below | ++---------------------------------------+--------------------+---------------+ | `kube_dashboard_enabled`_ | - true | true | | | - false | | +---------------------------------------+--------------------+---------------+ +| `influx_grafana_dashboard_enabled`_ | - true | false | +| | - false | | ++---------------------------------------+--------------------+---------------+ | `docker_volume_type`_ | see below | see below | +---------------------------------------+--------------------+---------------+ | `etcd_volume_size`_ | etcd storage | 0 | @@ -341,10 +355,28 @@ +---------------------------------------+--------------------+---------------+ | `container_infra_prefix`_ | see below | "" | +---------------------------------------+--------------------+---------------+ -+---------------------------------------+--------------------+---------------+ | `availability_zone`_ | AZ for the cluster | "" | | | nodes | | +---------------------------------------+--------------------+---------------+ +| `cert_manager_api`_ | see below | false | ++---------------------------------------+--------------------+---------------+ +| `ingress_controller`_ | see below | "" | ++---------------------------------------+--------------------+---------------+ +| `ingress_controller_role`_ | see below | "ingress" | ++---------------------------------------+--------------------+---------------+ +| `kubelet_options`_ | extra kubelet args | "" | ++---------------------------------------+--------------------+---------------+ +| `kubeapi_options`_ | extra kubeapi args | "" | ++---------------------------------------+--------------------+---------------+ +| `kubescheduler_options`_ | extra kubescheduler| "" | +| | args | | ++---------------------------------------+--------------------+---------------+ +| `kubecontroller_options`_ | extra | "" | +| | kubecontroller args| | ++---------------------------------------+--------------------+---------------+ +| `kubeproxy_options`_ | extra kubeproxy | "" | +| | args | | ++---------------------------------------+--------------------+---------------+ Cluster ------- @@ -1082,6 +1114,7 @@ which assumes an operator has cloned all expected images in myregistry.example.com/mycloud. Images that must be mirrored: + * docker.io/coredns/coredns:011 * docker.io/grafana/grafana:latest * docker.io/openstackmagnum/kubernetes-apiserver @@ -1089,15 +1122,76 @@ * docker.io/openstackmagnum/kubernetes-kubelet * docker.io/openstackmagnum/kubernetes-proxy * docker.io/openstackmagnum/kubernetes-scheduler + * docker.io/openstackmagnum/etcd + * docker.io/openstackmagnum/flannel * docker.io/prom/node-exporter:latest * docker.io/prom/prometheus:latest * gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1 * gcr.io/google_containers/pause:3.0 +_`kube_tag` + This label allows users to select `a specific Kubernetes release, + based on its container tag + `_. + If unset, the current Magnum version's default Kubernetes release is + installed. + +_`etcd_tag` + This label allows users to select `a specific etcd version, + based on its container tag + `_. + If unset, the current Magnum version's a default etcd version. + For queens, v3.2.7 + +_`flannel_tag` + This label allows users to select `a specific flannel version, + based on its container tag + `_. + If unset, the current Magnum version's a default flannel version. + For queens, v0.9.0 + _`kube_dashboard_enabled` This label triggers the deployment of the kubernetes dashboard. The default value is 1, meaning it will be enabled. +_`cert_manager_api` + This label enables the kubernetes `certificate manager api + `_. +_`kubelet_options` + This label can hold any additional options to be passed to the kubelet. + For more details, refer to the `kubelet admin guide + `_. + By default no additional options are passed. + +_`kubeproxy_options` + This label can hold any additional options to be passed to the kube proxy. + For more details, refer to the `kube proxy admin guide + `_. + By default no additional options are passed. + +_`kubecontroller_options` + This label can hold any additional options to be passed to the kube controller manager. + For more details, refer to the `kube controller manager admin guide + `_. + By default no additional options are passed. + +_`kubeapi_options` + This label can hold any additional options to be passed to the kube api server. + For more details, refer to the `kube api admin guide + `_. + By default no additional options are passed. + +_`kubescheduler_options` + This label can hold any additional options to be passed to the kube scheduler. + For more details, refer to the `kube scheduler admin guide + `_. + By default no additional options are passed. + +_`influx_grafana_dashboard_enabled` + The kubernetes dashboard comes with heapster enabled. If this + label is set, an influxdb and grafana instance will be deployed, + heapster will push data to influx and grafana will project them. + External load balancer for services ----------------------------------- @@ -1124,6 +1218,29 @@ Refer to the `Kubernetes External Load Balancer`_ section for more details. +Ingress Controller +------------------ + +In addition to the LoadBalancer described above, Kubernetes can also +be configured with an Ingress Controller. Ingress can provide load +balancing, SSL termination and name-based virtual hosting. + +Magnum allows selecting one of multiple controller options via the +'ingress_controller' label. Check the Kubernetes documentation to define +your own Ingress resources. + +_`ingress_controller` + This label sets the Ingress Controller to be used. Currently only traefik + is supported. The default is '', meaning no Ingress Controller configured. + +_`ingress_controller_role` + This label defines the role nodes should have to run an instance of the + Ingress Controller. This gives operators full control on which nodes should + be running an instance of the controller, and should be set in multiple nodes + for availability. Default is 'ingress'. An example of setting this in a + Kubernetes node would be:: + + kubectl label node role=ingress Swarm ===== @@ -1749,6 +1866,25 @@ extendedKeyUsage = clientAuth END + For RBAC enabled kubernetes clusters you need to use the name admin and + system:masters as Organization (O=):: + + $ cat > client.conf << END + [req] + distinguished_name = req_distinguished_name + req_extensions = req_ext + prompt = no + [req_distinguished_name] + CN = admin + O = system:masters + OU=OpenStack/Magnum + C=US + ST=TX + L=Austin + [req_ext] + extendedKeyUsage = clientAuth + END + Once you have client.conf, you can run the openssl 'req' command to generate the CSR:: @@ -1979,13 +2115,15 @@ The network driver name for instantiating container networks. Currently, the following network drivers are supported: - +--------+-------------+-----------+-------------+ - | Driver | Kubernetes | Swarm | Mesos | - +========+=============+===========+=============+ - | Flannel| supported | supported | unsupported | - +--------+-------------+-----------+-------------+ - | Docker | unsupported | supported | supported | - +--------+-------------+-----------+-------------+ + +--------+-------------+-------------+-------------+ + | Driver | Kubernetes | Swarm | Mesos | + +========+=============+=============+=============+ + | Flannel| supported | supported | unsupported | + +--------+-------------+-------------+-------------+ + | Docker | unsupported | supported | supported | + +--------+-------------+-------------+-------------+ + | Calico | supported | unsupported | unsupported | + +--------+-------------+-------------+-------------+ If not specified, the default driver is Flannel for Kubernetes, and Docker for Swarm and Mesos. @@ -2021,6 +2159,26 @@ is not specified in the ClusterTemplate, *host-gw* is the best choice for the Flannel backend. +When Calico is specified as the network driver, the following +optional labels can be added: + +_`calico_ipv4pool` + IPv4 network in CIDR format which is the IP pool, from which Pod IPs will + be chosen. If not specified, the default is 192.168.0.0/16. + +_`calico_tag` + Tag of the calico containers used to provision the calico node + +_`calico_cni_tag` + Tag of the cni used to provision the calico node + +_`calico_kube_controllers_tag` + Tag of the kube_controllers used to provision the calico node + +Besides, the Calico network driver needs kube_tag with v1.9.3 or later, because +Calico needs extra mounts for the kubelet container. See `commit +`_ +of atomic-system-containers for more information. High Availability ================= @@ -2509,7 +2667,7 @@ +-------------+-----------+ | Docker | 1.13.1 | +-------------+-----------+ -| Kubernetes | 1.7.4 | +| Kubernetes | 1.9.3 | +-------------+-----------+ | etcd | 3.1.3 | +-------------+-----------+ diff -Nru magnum-6.0.1/magnum/api/attr_validator.py magnum-6.1.0/magnum/api/attr_validator.py --- magnum-6.0.1/magnum/api/attr_validator.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/api/attr_validator.py 2018-02-23 14:02:04.000000000 +0000 @@ -210,6 +210,33 @@ "master_count must be 1 when master_lb_enabled is False")) +def validate_federation_hostcluster(cluster_uuid): + """Validate Federation `hostcluster_id` parameter. + + If the parameter was not specified raise an + `exceptions.InvalidParameterValue`. If the specified identifier does not + identify any Cluster, raise `exception.ClusterNotFound` + """ + if cluster_uuid is not None: + api_utils.get_resource('Cluster', cluster_uuid) + else: + raise exception.InvalidParameterValue( + "No hostcluster specified. " + "Please specify a hostcluster_id.") + + +def validate_federation_properties(properties): + """Validate Federation `properties` parameter.""" + if properties is None: + raise exception.InvalidParameterValue( + "Please specify a `properties` " + "dict for the federation.") + # Currently, we only support the property `dns-zone`. + if properties.get('dns-zone') is None: + raise exception.InvalidParameterValue("No DNS zone specified. " + "Please specify a `dns-zone`.") + + # Dictionary that maintains a list of validation functions validators = {'image_id': validate_image, 'flavor_id': validate_flavor, diff -Nru magnum-6.0.1/magnum/api/controllers/v1/cluster.py magnum-6.1.0/magnum/api/controllers/v1/cluster.py --- magnum-6.0.1/magnum/api/controllers/v1/cluster.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/api/controllers/v1/cluster.py 2018-02-23 14:02:04.000000000 +0000 @@ -554,6 +554,11 @@ :param cluster_ident: UUID of cluster or logical name of the cluster. """ context = pecan.request.context + if context.is_admin: + policy.enforce(context, 'cluster:delete_all_projects', + action='cluster:delete_all_projects') + context.all_tenants = True + cluster = api_utils.get_resource('Cluster', cluster_ident) policy.enforce(context, 'cluster:delete', cluster.as_dict(), action='cluster:delete') diff -Nru magnum-6.0.1/magnum/api/controllers/v1/cluster_template.py magnum-6.1.0/magnum/api/controllers/v1/cluster_template.py --- magnum-6.0.1/magnum/api/controllers/v1/cluster_template.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/api/controllers/v1/cluster_template.py 2018-02-23 14:02:04.000000000 +0000 @@ -368,6 +368,7 @@ @validation.enforce_network_driver_types_create() @validation.enforce_volume_driver_types_create() @validation.enforce_volume_storage_size_create() + @validation.enforce_driver_supported() def post(self, cluster_template): """Create a new ClusterTemplate. @@ -464,6 +465,11 @@ ClusterTemplate. """ context = pecan.request.context + if context.is_admin: + policy.enforce(context, 'clustertemplate:delete_all_projects', + action='clustertemplate:delete_all_projects') + context.all_tenants = True + cluster_template = api_utils.get_resource('ClusterTemplate', cluster_template_ident) policy.enforce(context, 'clustertemplate:delete', diff -Nru magnum-6.0.1/magnum/api/controllers/v1/federation.py magnum-6.1.0/magnum/api/controllers/v1/federation.py --- magnum-6.0.1/magnum/api/controllers/v1/federation.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/api/controllers/v1/federation.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,454 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo_log import log as logging +import pecan +import wsme +from wsme import types as wtypes + +from magnum.api import attr_validator +from magnum.api.controllers import base +from magnum.api.controllers import link +from magnum.api.controllers.v1 import collection +from magnum.api.controllers.v1 import types +from magnum.api import expose +from magnum.api import utils as api_utils +from magnum.api import validation +from magnum.common import exception +from magnum.common import name_generator +from magnum.common import policy +import magnum.conf +from magnum import objects +from magnum.objects import fields + +LOG = logging.getLogger(__name__) +CONF = magnum.conf.CONF + + +class FederationID(wtypes.Base): + """API representation of a federation ID + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of a + federation ID. + """ + uuid = types.uuid + + def __init__(self, uuid): + self.uuid = uuid + + +class Federation(base.APIBase): + """API representation of a federation. + + This class enforces type checking and value constraints, and converts + between the internal object model and the API representation of a + Federation. + """ + + # Unique UUID for this federation. + uuid = types.uuid + + # Name of this federation, max length is limited to 242 because heat stack + # requires max length limit to 255, and Magnum amend a uuid length. + name = wtypes.StringType(min_length=1, max_length=242, + pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$') + + # UUID of the hostcluster of the federation, i.e. the cluster that + # hosts the COE Federated API. + hostcluster_id = wsme.wsattr(wtypes.text) + + # List of UUIDs of all the member clusters of the federation. + member_ids = wsme.wsattr([wtypes.text]) + + # Status of the federation. + status = wtypes.Enum(str, *fields.FederationStatus.ALL) + + # Status reason of the federation. + status_reason = wtypes.text + + # Set of federation metadata (COE-specific in some cases). + properties = wtypes.DictType(str, str) + + # A list containing a self link and associated federations links + links = wsme.wsattr([link.Link], readonly=True) + + def __init__(self, **kwargs): + super(Federation, self).__init__() + self.fields = [] + for field in objects.Federation.fields: + # Skip fields we do not expose. + if not hasattr(self, field): + continue + self.fields.append(field) + setattr(self, field, kwargs.get(field, wtypes.Unset)) + + @staticmethod + def _convert_with_links(federation, url, expand=True): + if not expand: + federation.unset_fields_except(['uuid', 'name', 'hostcluster_id', + 'member_ids', 'status', + 'properties']) + + federation.links = [link.Link.make_link('self', url, 'federations', + federation.uuid), + link.Link.make_link('bookmark', url, 'federations', + federation.uuid, + bookmark=True)] + return federation + + @classmethod + def convert_with_links(cls, rpc_federation, expand=True): + federation = Federation(**rpc_federation.as_dict()) + return cls._convert_with_links(federation, pecan.request.host_url, + expand) + + @classmethod + def sample(cls, expand=True): + sample = cls(uuid='4221a353-8368-475f-b7de-3429d3f724b3', + name='example', + hostcluster_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', + member_ids=['49dc23f5-ffc9-40c3-9d34-7be7f9e34d63', + 'f2439bcf-02a2-4278-9d8a-f07a2042230a', + 'e549e0a5-3d3c-406f-bd7c-0e0182fb211c'], + properties={'dns-zone': 'example.com.'}, + status=fields.FederationStatus.CREATE_COMPLETE, + status_reason="CREATE completed successfully") + return cls._convert_with_links(sample, 'http://localhost:9511', expand) + + +class FederationPatchType(types.JsonPatchType): + _api_base = Federation + + @staticmethod + def internal_attrs(): + """"Returns a list of internal attributes. + + Internal attributes can't be added, replaced or removed. + """ + internal_attrs = [] + return types.JsonPatchType.internal_attrs() + internal_attrs + + +class FederationCollection(collection.Collection): + """API representation of a collection of federations.""" + + # A list containing federation objects. + federations = [Federation] + + def __init__(self, **kwargs): + self._type = 'federations' + + @staticmethod + def convert_with_links(rpc_federation, limit, url=None, expand=False, + **kwargs): + collection = FederationCollection() + collection.federations = [Federation.convert_with_links(p, expand) + for p in rpc_federation] + collection.next = collection.get_next(limit, url=url, **kwargs) + return collection + + @classmethod + def sample(cls): + sample = cls() + sample.federations = [Federation.sample(expand=False)] + return sample + + +class FederationsController(base.Controller): + """REST controller for federations.""" + + def __init__(self): + super(FederationsController, self).__init__() + + _custom_actions = { + 'detail': ['GET'], + } + + def _generate_name_for_federation(self, context): + """Generate a random name like: phi-17-federation.""" + name_gen = name_generator.NameGenerator() + name = name_gen.generate() + return name + '-federation' + + def _get_federation_collection(self, marker, limit, + sort_key, sort_dir, expand=False, + resource_url=None): + limit = api_utils.validate_limit(limit) + sort_dir = api_utils.validate_sort_dir(sort_dir) + + marker_obj = None + if marker: + marker_obj = objects.Federation.get_by_uuid(pecan.request.context, + marker) + + federations = objects.Federation.list(pecan.request.context, limit, + marker_obj, sort_key=sort_key, + sort_dir=sort_dir) + + return FederationCollection.convert_with_links(federations, limit, + url=resource_url, + expand=expand, + sort_key=sort_key, + sort_dir=sort_dir) + + @expose.expose(FederationCollection, types.uuid, int, wtypes.text, + wtypes.text) + def get_all(self, marker=None, limit=None, sort_key='id', + sort_dir='asc'): + """Retrieve a list of federations. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + """ + context = pecan.request.context + policy.enforce(context, 'federation:get_all', + action='federation:get_all') + return self._get_federation_collection(marker, limit, sort_key, + sort_dir) + + @expose.expose(FederationCollection, types.uuid, int, wtypes.text, + wtypes.text) + def detail(self, marker=None, limit=None, sort_key='id', + sort_dir='asc'): + """Retrieve a list of federation with detail. + + :param marker: pagination marker for large data sets. + :param limit: maximum number of resources to return in a single result. + :param sort_key: column to sort results by. Default: id. + :param sort_dir: direction to sort. "asc" or "desc". Default: asc. + """ + context = pecan.request.context + policy.enforce(context, 'federation:detail', + action='federation:detail') + + # NOTE(lucasagomes): /detail should only work against collections + parent = pecan.request.path.split('/')[:-1][-1] + if parent != "federations": + raise exception.HTTPNotFound + + expand = True + resource_url = '/'.join(['federations', 'detail']) + return self._get_federation_collection(marker, limit, + sort_key, sort_dir, expand, + resource_url) + + @expose.expose(Federation, types.uuid_or_name) + def get_one(self, federation_ident): + """Retrieve information about a given Federation. + + :param federation_ident: UUID or logical name of the Federation. + """ + context = pecan.request.context + federation = api_utils.get_resource('Federation', federation_ident) + policy.enforce(context, 'federation:get', federation.as_dict(), + action='federation:get') + + federation = Federation.convert_with_links(federation) + + return federation + + @expose.expose(FederationID, body=Federation, status_code=202) + def post(self, federation): + """Create a new federation. + + :param federation: a federation within the request body. + """ + context = pecan.request.context + policy.enforce(context, 'federation:create', + action='federation:create') + + federation_dict = federation.as_dict() + + # Validate `hostcluster_id` + hostcluster_id = federation_dict.get('hostcluster_id') + attr_validator.validate_federation_hostcluster(hostcluster_id) + + # Validate `properties` dict. + properties_dict = federation_dict.get('properties') + attr_validator.validate_federation_properties(properties_dict) + + federation_dict['project_id'] = context.project_id + + # If no name is specified, generate a random human-readable name + name = (federation_dict.get('name') or + self._generate_name_for_federation(context)) + federation_dict['name'] = name + + new_federation = objects.Federation(context, **federation_dict) + new_federation.uuid = uuid.uuid4() + + # TODO(clenimar): remove hard-coded `create_timeout`. + pecan.request.rpcapi.federation_create_async(new_federation, + create_timeout=15) + + return FederationID(new_federation.uuid) + + @expose.expose(FederationID, types.uuid_or_name, types.boolean, + body=[FederationPatchType], status_code=202) + def patch(self, federation_ident, rollback=False, patch=None): + """Update an existing Federation. + + Please note that the join/unjoin operation is performed by patching + `member_ids`. + + :param federation_ident: UUID or logical name of a federation. + :param rollback: whether to rollback federation on update failure. + :param patch: a json PATCH document to apply to this federation. + """ + federation = self._patch(federation_ident, patch) + pecan.request.rpcapi.federation_update_async(federation, rollback) + return FederationID(federation.uuid) + + def _patch(self, federation_ident, patch): + context = pecan.request.context + federation = api_utils.get_resource('Federation', federation_ident) + policy.enforce(context, 'federation:update', federation.as_dict(), + action='federation:update') + + # NOTE(clenimar): Magnum does not allow one to append items to existing + # fields through an `add` operation using HTTP PATCH (please check + # `magnum.api.utils.apply_jsonpatch`). In order to perform the join + # and unjoin operations, intercept the original JSON PATCH document + # and change the operation from either `add` or `remove` to `replace`. + patch_path = patch[0].get('path') + patch_value = patch[0].get('value') + patch_op = patch[0].get('op') + + if patch_path == '/member_ids': + if patch_op == 'add' and patch_value is not None: + patch = self._join_wrapper(federation_ident, patch) + elif patch_op == 'remove' and patch_value is not None: + patch = self._unjoin_wrapper(federation_ident, patch) + + try: + federation_dict = federation.as_dict() + new_federation = Federation( + **api_utils.apply_jsonpatch(federation_dict, patch)) + except api_utils.JSONPATCH_EXCEPTIONS as e: + raise exception.PatchError(patch=patch, reason=e) + + # Retrieve only what changed after the patch. + delta = self._update_changed_fields(federation, new_federation) + validation.validate_federation_properties(delta) + + return federation + + def _update_changed_fields(self, federation, new_federation): + """Update only the patches that were modified and return the diff.""" + for field in objects.Federation.fields: + try: + patch_val = getattr(new_federation, field) + except AttributeError: + # Ignore fields that aren't exposed in the API + continue + if patch_val == wtypes.Unset: + patch_val = None + if federation[field] != patch_val: + federation[field] = patch_val + + return federation.obj_what_changed() + + def _join_wrapper(self, federation_ident, patch): + """Intercept PATCH JSON documents for join operations. + + Take a PATCH JSON document with `add` operation:: + { + 'op': 'add', + 'value': 'new_member_id', + 'path': '/member_ids' + } + and transform it into a document with `replace` operation:: + { + 'op': 'replace', + 'value': ['current_member_id1', ..., 'new_member_id'], + 'path': '/member_ids' + } + """ + federation = api_utils.get_resource('Federation', federation_ident) + new_member_uuid = patch[0]['value'] + + # Check if the cluster exists + c = objects.Cluster.get_by_uuid(pecan.request.context, new_member_uuid) + + # Check if the cluster is already a member of the federation + if new_member_uuid not in federation.member_ids and c is not None: + # Retrieve all current members + members = federation.member_ids + # Add the new member + members.append(c.uuid) + else: + kw = {'uuid': new_member_uuid, 'federation_name': federation.name} + raise exception.MemberAlreadyExists(**kw) + + # Set `value` to the updated member list. Change `op` to `replace` + patch[0]['value'] = members + patch[0]['op'] = 'replace' + + return patch + + def _unjoin_wrapper(self, federation_ident, patch): + """Intercept PATCH JSON documents for unjoin operations. + + Take a PATCH JSON document with `remove` operation:: + { + 'op': 'remove', + 'value': 'former_member_id', + 'path': '/member_ids' + } + and transform it into a document with `replace` operation:: + { + 'op': 'replace', + 'value': ['current_member_id1', ..., 'current_member_idn'], + 'path': '/member_ids' + } + """ + federation = api_utils.get_resource('Federation', federation_ident) + cluster_uuid = patch[0]['value'] + + # Check if the cluster exists + c = objects.Cluster.get_by_uuid(pecan.request.context, cluster_uuid) + + # Check if the cluster is a member cluster and if it exists + if cluster_uuid in federation.member_ids and c is not None: + # Retrieve all current members + members = federation.member_ids + # Unjoin the member + members.remove(cluster_uuid) + else: + raise exception.HTTPNotFound("Cluster %s is not a member of the " + "federation %s." % (cluster_uuid, + federation.name)) + + # Set `value` to the updated member list. Change `op` to `replace` + patch[0]['value'] = members + patch[0]['op'] = 'replace' + + return patch + + @expose.expose(None, types.uuid_or_name, status_code=204) + def delete(self, federation_ident): + """Delete a federation. + + :param federation_ident: UUID of federation or logical name + of the federation. + """ + context = pecan.request.context + federation = api_utils.get_resource('Federation', federation_ident) + policy.enforce(context, 'federation:delete', federation.as_dict(), + action='federation:delete') + + pecan.request.rpcapi.federation_delete_async(federation.uuid) diff -Nru magnum-6.0.1/magnum/api/controllers/v1/__init__.py magnum-6.1.0/magnum/api/controllers/v1/__init__.py --- magnum-6.0.1/magnum/api/controllers/v1/__init__.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/api/controllers/v1/__init__.py 2018-02-23 14:02:04.000000000 +0000 @@ -29,6 +29,7 @@ from magnum.api.controllers.v1 import certificate from magnum.api.controllers.v1 import cluster from magnum.api.controllers.v1 import cluster_template +from magnum.api.controllers.v1 import federation from magnum.api.controllers.v1 import magnum_services from magnum.api.controllers.v1 import quota from magnum.api.controllers.v1 import stats @@ -99,6 +100,9 @@ stats = [link.Link] """Links to the stats resource""" + # Links to the federations resources + federations = [link.Link] + @staticmethod def convert(): v1 = V1() @@ -161,6 +165,13 @@ pecan.request.host_url, 'stats', '', bookmark=True)] + v1.federations = [link.Link.make_link('self', pecan.request.host_url, + 'federations', ''), + link.Link.make_link('bookmark', + pecan.request.host_url, + 'federations', '', + bookmark=True)] + return v1 @@ -175,6 +186,7 @@ certificates = certificate.CertificateController() mservices = magnum_services.MagnumServiceController() stats = stats.StatsController() + federations = federation.FederationsController() @expose.expose(V1) def get(self): diff -Nru magnum-6.0.1/magnum/api/utils.py magnum-6.1.0/magnum/api/utils.py --- magnum-6.0.1/magnum/api/utils.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/api/utils.py 2018-02-23 14:02:04.000000000 +0000 @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import ast import jsonpatch from oslo_utils import uuidutils import pecan @@ -80,6 +81,14 @@ msg = _("The attribute %s has existed, please use " "'replace' operation instead.") % p['path'] raise wsme.exc.ClientSideError(msg) + + if p['op'] == 'replace' and p['path'] == '/labels': + try: + val = p['value'] + dict_val = val if type(val) == dict else ast.literal_eval(val) + p['value'] = dict_val + except (SyntaxError, ValueError, AssertionError) as e: + raise exception.PatchError(patch=patch, reason=e) return jsonpatch.apply_patch(doc, patch) diff -Nru magnum-6.0.1/magnum/api/validation.py magnum-6.1.0/magnum/api/validation.py --- magnum-6.0.1/magnum/api/validation.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/api/validation.py 2018-02-23 14:02:04.000000000 +0000 @@ -30,6 +30,7 @@ CONF = magnum.conf.CONF cluster_update_allowed_properties = set(['node_count']) +federation_update_allowed_properties = set(['member_ids', 'properties']) def enforce_cluster_type_supported(): @@ -47,6 +48,30 @@ return wrapper +def enforce_driver_supported(): + @decorator.decorator + def wrapper(func, *args, **kwargs): + cluster_template = args[1] + cluster_distro = cluster_template.cluster_distro + if not cluster_distro: + try: + cli = clients.OpenStackClients(pecan.request.context) + image_id = cluster_template.image_id + image = api_utils.get_openstack_resource(cli.glance().images, + image_id, + 'images') + cluster_distro = image.get('os_distro') + except Exception: + pass + cluster_type = (cluster_template.server_type, + cluster_distro, + cluster_template.coe) + driver.Driver.get_driver(*cluster_type) + return func(*args, **kwargs) + + return wrapper + + def enforce_cluster_volume_storage_size(): @decorator.decorator def wrapper(func, *args, **kwargs): @@ -200,6 +225,15 @@ raise exception.InvalidParameterValue(err=err) +def validate_federation_properties(delta): + + update_disallowed_properties = delta - federation_update_allowed_properties + if update_disallowed_properties: + err = (_("cannot change federation property(ies) %s.") % + ", ".join(update_disallowed_properties)) + raise exception.InvalidParameterValue(err=err) + + class Validator(object): @classmethod @@ -275,7 +309,7 @@ class K8sValidator(Validator): - supported_network_drivers = ['flannel'] + supported_network_drivers = ['flannel', 'calico'] supported_server_types = ['vm', 'bm'] allowed_network_drivers = ( CONF.cluster_template.kubernetes_allowed_network_drivers) diff -Nru magnum-6.0.1/magnum/cmd/conductor.py magnum-6.1.0/magnum/cmd/conductor.py --- magnum-6.0.1/magnum/cmd/conductor.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/cmd/conductor.py 2018-02-23 14:02:04.000000000 +0000 @@ -28,6 +28,7 @@ from magnum.conductor.handlers import ca_conductor from magnum.conductor.handlers import cluster_conductor from magnum.conductor.handlers import conductor_listener +from magnum.conductor.handlers import federation_conductor from magnum.conductor.handlers import indirection_api import magnum.conf from magnum import version @@ -51,6 +52,7 @@ cluster_conductor.Handler(), conductor_listener.Handler(), ca_conductor.Handler(), + federation_conductor.Handler(), ] server = rpc_service.Service.create(CONF.conductor.topic, diff -Nru magnum-6.0.1/magnum/common/exception.py magnum-6.1.0/magnum/common/exception.py --- magnum-6.0.1/magnum/common/exception.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/common/exception.py 2018-02-23 14:02:04.000000000 +0000 @@ -367,6 +367,10 @@ message = _("Failed to list regions.") +class ServicesListFailed(MagnumException): + message = _("Failed to list services.") + + class TrusteeOrTrustToClusterFailed(MagnumException): message = _("Failed to create trustee or trust for Cluster: " "%(cluster_uuid)s") @@ -382,3 +386,8 @@ class FederationAlreadyExists(Conflict): message = _("A federation with UUID %(uuid)s already exists.") + + +class MemberAlreadyExists(Conflict): + message = _("A cluster with UUID %(uuid)s is already a member of the" + "federation %(federation_name)s.") diff -Nru magnum-6.0.1/magnum/common/keystone.py magnum-6.1.0/magnum/common/keystone.py --- magnum-6.0.1/magnum/common/keystone.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/common/keystone.py 2018-02-23 14:02:04.000000000 +0000 @@ -291,3 +291,32 @@ 'region_name_list': '/'.join( region_list + ['unspecified'])}) return region_name + + +def is_octavia_enabled(): + """Check if Octavia service is deployed in the cloud. + + Octavia is already an official LBaaS solution for Openstack + (https://governance.openstack.org/tc/reference/projects/octavia.html) and + will deprecate the neutron-lbaas extension starting from Queens release. + + We use Octavia instead of Neutron LBaaS API for load balancing + functionality for k8s cluster if Octavia service is deployed and enabled + in the cloud. + """ + # Put the import here to avoid circular importing. + from magnum.common import context + admin_context = context.make_admin_context() + keystone = KeystoneClientV3(admin_context) + + try: + octavia_svc = keystone.client.services.list(type='load-balancer') + except Exception: + LOG.exception('Failed to list services') + raise exception.ServicesListFailed() + + # Always assume there is only one load balancing service configured. + if octavia_svc and octavia_svc[0].enabled: + return True + + return False diff -Nru magnum-6.0.1/magnum/common/policies/cluster.py magnum-6.1.0/magnum/common/policies/cluster.py --- magnum-6.0.1/magnum/common/policies/cluster.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/common/policies/cluster.py 2018-02-23 14:02:04.000000000 +0000 @@ -41,6 +41,17 @@ ] ), policy.DocumentedRuleDefault( + name=CLUSTER % 'delete_all_projects', + check_str=base.RULE_ADMIN_API, + description='Delete a cluster from any project.', + operations=[ + { + 'path': '/v1/clusters/{cluster_ident}', + 'method': 'DELETE' + } + ] + ), + policy.DocumentedRuleDefault( name=CLUSTER % 'detail', check_str=base.RULE_DENY_CLUSTER_USER, description='Retrieve a list of clusters with detail.', diff -Nru magnum-6.0.1/magnum/common/policies/cluster_template.py magnum-6.1.0/magnum/common/policies/cluster_template.py --- magnum-6.0.1/magnum/common/policies/cluster_template.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/common/policies/cluster_template.py 2018-02-23 14:02:04.000000000 +0000 @@ -41,6 +41,17 @@ ] ), policy.DocumentedRuleDefault( + name=CLUSTER_TEMPLATE % 'delete_all_projects', + check_str=base.RULE_ADMIN_API, + description='Delete a cluster template from any project.', + operations=[ + { + 'path': '/v1/clustertemplate/{clustertemplate_ident}', + 'method': 'DELETE' + } + ] + ), + policy.DocumentedRuleDefault( name=CLUSTER_TEMPLATE % 'detail_all_projects', check_str=base.RULE_ADMIN_API, description=('Retrieve a list of cluster templates with detail across ' diff -Nru magnum-6.0.1/magnum/common/policies/federation.py magnum-6.1.0/magnum/common/policies/federation.py --- magnum-6.0.1/magnum/common/policies/federation.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/common/policies/federation.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,91 @@ +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from oslo_policy import policy + +from magnum.common.policies import base + +FEDERATION = 'federation:%s' + +rules = [ + policy.DocumentedRuleDefault( + name=FEDERATION % 'create', + check_str=base.RULE_DENY_CLUSTER_USER, + description='Create a new federation.', + operations=[ + { + 'path': '/v1/federations', + 'method': 'POST' + } + ] + ), + policy.DocumentedRuleDefault( + name=FEDERATION % 'delete', + check_str=base.RULE_DENY_CLUSTER_USER, + description='Delete a federation.', + operations=[ + { + 'path': '/v1/federations/{federation_ident}', + 'method': 'DELETE' + } + ] + ), + policy.DocumentedRuleDefault( + name=FEDERATION % 'detail', + check_str=base.RULE_DENY_CLUSTER_USER, + description='Retrieve a list of federations with detail.', + operations=[ + { + 'path': '/v1/federations', + 'method': 'GET' + } + ] + ), + policy.DocumentedRuleDefault( + name=FEDERATION % 'get', + check_str=base.RULE_DENY_CLUSTER_USER, + description='Retrieve information about the given federation.', + operations=[ + { + 'path': '/v1/federations/{federation_ident}', + 'method': 'GET' + } + ] + ), + policy.DocumentedRuleDefault( + name=FEDERATION % 'get_all', + check_str=base.RULE_DENY_CLUSTER_USER, + description='Retrieve a list of federations.', + operations=[ + { + 'path': '/v1/federations/', + 'method': 'GET' + } + ] + ), + policy.DocumentedRuleDefault( + name=FEDERATION % 'update', + check_str=base.RULE_DENY_CLUSTER_USER, + description='Update an existing federation.', + operations=[ + { + 'path': '/v1/federations/{federation_ident}', + 'method': 'PATCH' + } + ] + ) +] + + +def list_rules(): + return rules diff -Nru magnum-6.0.1/magnum/common/policies/__init__.py magnum-6.1.0/magnum/common/policies/__init__.py --- magnum-6.0.1/magnum/common/policies/__init__.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/common/policies/__init__.py 2018-02-23 14:02:04.000000000 +0000 @@ -20,6 +20,7 @@ from magnum.common.policies import certificate from magnum.common.policies import cluster from magnum.common.policies import cluster_template +from magnum.common.policies import federation from magnum.common.policies import magnum_service from magnum.common.policies import quota from magnum.common.policies import stats @@ -33,6 +34,7 @@ certificate.list_rules(), cluster.list_rules(), cluster_template.list_rules(), + federation.list_rules(), magnum_service.list_rules(), quota.list_rules(), stats.list_rules() diff -Nru magnum-6.0.1/magnum/common/x509/operations.py magnum-6.1.0/magnum/common/x509/operations.py --- magnum-6.0.1/magnum/common/x509/operations.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/common/x509/operations.py 2018-02-23 14:02:04.000000000 +0000 @@ -46,20 +46,23 @@ ) -def generate_client_certificate(issuer_name, subject_name, ca_key, +def generate_client_certificate(issuer_name, subject_name, + organization_name, ca_key, encryption_password=None, ca_key_password=None): """Generate Client Certificate :param issuer_name: issuer name :param subject_name: subject name of client + :param organization_name: Organization name of client :param ca_key: private key of CA :param encryption_password: encryption passsword for private key :param ca_key_password: private key password for given ca key :returns: generated private key and certificate pair """ return _generate_certificate(issuer_name, subject_name, - _build_client_extentions(), ca_key=ca_key, + _build_client_extentions(), + organization_name, ca_key=ca_key, encryption_password=encryption_password, ca_key_password=ca_key_password) @@ -97,11 +100,14 @@ encryption_password=encryption_password) -def _generate_certificate(issuer_name, subject_name, extensions, ca_key=None, +def _generate_certificate(issuer_name, subject_name, extensions, + organization_name=None, ca_key=None, encryption_password=None, ca_key_password=None): if not isinstance(subject_name, six.text_type): subject_name = six.text_type(subject_name.decode('utf-8')) + if organization_name and not isinstance(organization_name, six.text_type): + organization_name = six.text_type(organization_name.decode('utf-8')) private_key = rsa.generate_private_key( public_exponent=65537, @@ -111,9 +117,11 @@ # subject name is set as common name csr = x509.CertificateSigningRequestBuilder() - csr = csr.subject_name(x509.Name([ - x509.NameAttribute(x509.OID_COMMON_NAME, subject_name), - ])) + name_attributes = [x509.NameAttribute(x509.OID_COMMON_NAME, subject_name)] + if organization_name: + name_attributes.append(x509.NameAttribute(x509.OID_ORGANIZATION_NAME, + organization_name)) + csr = csr.subject_name(x509.Name(name_attributes)) for extention in extensions: csr = csr.add_extension(extention.value, critical=extention.critical) diff -Nru magnum-6.0.1/magnum/conductor/api.py magnum-6.1.0/magnum/conductor/api.py --- magnum-6.0.1/magnum/conductor/api.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/conductor/api.py 2018-02-23 14:02:04.000000000 +0000 @@ -51,6 +51,29 @@ def cluster_update_async(self, cluster, rollback=False): self._cast('cluster_update', cluster=cluster, rollback=rollback) + # Federation Operations + + def federation_create(self, federation, create_timeout): + return self._call('federation_create', federation=federation, + create_timeout=create_timeout) + + def federation_create_async(self, federation, create_timeout): + self._cast('federation_create', federation=federation, + create_timeout=create_timeout) + + def federation_delete(self, uuid): + return self._call('federation_delete', uuid=uuid) + + def federation_delete_async(self, uuid): + self._cast('federation_delete', uuid=uuid) + + def federation_update(self, federation): + return self._call('federation_update', federation=federation) + + def federation_update_async(self, federation, rollback=False): + self._cast('federation_update', federation=federation, + rollback=rollback) + # CA operations def sign_certificate(self, cluster, certificate): diff -Nru magnum-6.0.1/magnum/conductor/handlers/common/cert_manager.py magnum-6.1.0/magnum/conductor/handlers/common/cert_manager.py --- magnum-6.0.1/magnum/conductor/handlers/common/cert_manager.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/conductor/handlers/common/cert_manager.py 2018-02-23 14:02:04.000000000 +0000 @@ -56,9 +56,18 @@ :returns: Magnum client cert uuid """ client_password = short_id.generate_id() + # TODO(strigazi): set subject name and organization per driver + # For RBAC kubernetes cluster we need the client to have: + # subject_name: admin + # organization_name system:masters + # Non kubernetes drivers are not using the certificates fields + # for authorization + subject_name = 'admin' + organization_name = 'system:masters' client_cert = x509.generate_client_certificate( issuer_name, - CONDUCTOR_CLIENT_NAME, + subject_name, + organization_name, ca_cert['private_key'], encryption_password=client_password, ca_key_password=ca_password, diff -Nru magnum-6.0.1/magnum/conductor/handlers/federation_conductor.py magnum-6.1.0/magnum/conductor/handlers/federation_conductor.py --- magnum-6.0.1/magnum/conductor/handlers/federation_conductor.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/conductor/handlers/federation_conductor.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from magnum.common import profiler +import magnum.conf + +CONF = magnum.conf.CONF + + +@profiler.trace_cls("rpc") +class Handler(object): + + def __init__(self): + super(Handler, self).__init__() + + def federation_create(self, context, federation, create_timeout): + raise NotImplementedError("This feature is not yet implemented.") + + def federation_update(self, context, federation, rollback=False): + raise NotImplementedError("This feature is not yet implemented.") + + def federation_delete(self, context, uuid): + raise NotImplementedError("This feature is not yet implemented.") diff -Nru magnum-6.0.1/magnum/conf/drivers.py magnum-6.1.0/magnum/conf/drivers.py --- magnum-6.0.1/magnum/conf/drivers.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/conf/drivers.py 2018-02-23 14:02:04.000000000 +0000 @@ -33,7 +33,13 @@ cfg.BoolOpt('send_cluster_metrics', default=True, help='Allow periodic tasks to pull COE data and send to ' - 'ceilometer.') + 'ceilometer.'), + cfg.ListOpt('disabled_drivers', + default=[], + help='Disabled driver entry points. The default value is []. ' + ' Means if not specified, then all available drivers ' + 'are enabled.' + ), ] diff -Nru magnum-6.0.1/magnum/drivers/common/driver.py magnum-6.1.0/magnum/drivers/common/driver.py --- magnum-6.0.1/magnum/drivers/common/driver.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/driver.py 2018-02-23 14:02:04.000000000 +0000 @@ -34,7 +34,8 @@ @classmethod def load_entry_points(cls): for entry_point in iter_entry_points('magnum.drivers'): - yield entry_point, entry_point.load(require=False) + if entry_point.name not in CONF.drivers.disabled_drivers: + yield entry_point, entry_point.load(require=False) @classmethod def get_drivers(cls): @@ -173,6 +174,21 @@ raise NotImplementedError("Subclasses must implement " "'delete_cluster'.") + @abc.abstractmethod + def create_federation(self, context, federation): + raise NotImplementedError("Subclasses must implement " + "'create_federation'.") + + @abc.abstractmethod + def update_federation(self, context, federation): + raise NotImplementedError("Subclasses must implement " + "'update_federation'.") + + @abc.abstractmethod + def delete_federation(self, context, federation): + raise NotImplementedError("Subclasses must implement " + "'delete_federation'.") + def get_monitor(self, context, cluster): """return the monitor with container data for this driver.""" diff -Nru magnum-6.0.1/magnum/drivers/common/image/heat-container-agent/Dockerfile magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/Dockerfile --- magnum-6.0.1/magnum/drivers/common/image/heat-container-agent/Dockerfile 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/image/heat-container-agent/Dockerfile 2018-02-23 14:02:04.000000000 +0000 @@ -15,7 +15,7 @@ findutils os-collect-config os-apply-config \ os-refresh-config dib-utils python-pip python-docker-py \ python-yaml python-zaqarclient python2-oslo-log \ - python-psutil && dnf clean all + python-psutil kubernetes-client && dnf clean all # pip installing dpath as python-dpath is an older version of dpath # install docker-compose diff -Nru magnum-6.0.1/magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml magnum-6.1.0/magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml --- magnum-6.0.1/magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,14 @@ +# Environment file to enable LBaaS in a cluster by mapping +# LBaaS-related resource types to the real Octavia resource types. +resource_registry: + "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_pool.yaml + + # Cluster template + "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Octavia::LoadBalancer" + "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Octavia::Listener" + "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Octavia::Pool" + "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Octavia::HealthMonitor" + "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Neutron::FloatingIP" + + # Master node template + "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Octavia::PoolMember" diff -Nru magnum-6.0.1/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh magnum-6.1.0/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh --- magnum-6.0.1/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh 2018-02-23 14:02:04.000000000 +0000 @@ -7,10 +7,9 @@ clear_docker_storage () { # stop docker systemctl stop docker + systemctl disable docker-storage-setup # clear storage graph rm -rf /var/lib/docker/* - # remove current LVs - docker-storage-setup --reset if [ -f /etc/sysconfig/docker-storage ]; then sed -i "/^DOCKER_STORAGE_OPTIONS=/ s/=.*/=/" /etc/sysconfig/docker-storage @@ -27,11 +26,7 @@ mount -a fi - sed -i "/^DOCKER_STORAGE_OPTIONS=/ s/=.*/=-s $1/" /etc/sysconfig/docker-storage - - local lvname=$(lvdisplay | grep "LV\ Path" | awk '{print $3}') - local pvname=$(pvdisplay | grep "PV\ Name" | awk '{print $3}') - lvextend -r $lvname $pvname + echo "DOCKER_STORAGE_OPTIONS=\"--storage-driver $1\"" > /etc/sysconfig/docker-storage } # Configure docker storage with devicemapper using direct LVM @@ -39,7 +34,7 @@ clear_docker_storage echo "GROWROOT=True" > /etc/sysconfig/docker-storage-setup - echo "ROOT_SIZE=5GB" >> /etc/sysconfig/docker-storage-setup + echo "STORAGE_DRIVER=devicemapper" >> /etc/sysconfig/docker-storage-setup if [ -n "$DOCKER_VOLUME_SIZE" ] && [ "$DOCKER_VOLUME_SIZE" -gt 0 ]; then @@ -48,6 +43,7 @@ echo "VG=docker" >> /etc/sysconfig/docker-storage-setup else + echo "ROOT_SIZE=5GB" >> /etc/sysconfig/docker-storage-setup echo "DATA_SIZE=95%FREE" >> /etc/sysconfig/docker-storage-setup fi diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,450 @@ +#!/bin/sh + +. /etc/sysconfig/heat-params + +if [ "$NETWORK_DRIVER" != "calico" ]; then + exit 0 +fi + +_prefix=${CONTAINER_INFRA_PREFIX:-quay.io/calico/} +ETCD_SERVER_IP=${ETCD_LB_VIP:-$KUBE_NODE_IP} +CERT_DIR=/etc/kubernetes/certs +ETCD_CA=`cat ${CERT_DIR}/ca.crt | base64 | tr -d '\n'` +ETCD_CERT=`cat ${CERT_DIR}/server.crt | base64 | tr -d '\n'` +ETCD_KEY=`cat ${CERT_DIR}/server.key | base64 | tr -d '\n'` + +CALICO_DEPLOY=/srv/magnum/kubernetes/manifests/calico-deploy.yaml + +[ -f ${CALICO_DEPLOY} ] || { +echo "Writing File: $CALICO_DEPLOY" +mkdir -p $(dirname ${CALICO_DEPLOY}) +cat << EOF > ${CALICO_DEPLOY} +# Calico Version v2.6.7 +# https://docs.projectcalico.org/v2.6/releases#v2.6.7 +# This manifest includes the following component versions: +# calico/node:v2.6.7 +# calico/cni:v1.11.2 +# calico/kube-controllers:v1.0.3 + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "https://${ETCD_SERVER_IP}:2379" + + # Configure the Calico backend to use. + calico_backend: "bird" + + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", + "mtu": 1500, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "/calico-secrets/etcd-ca" + etcd_cert: "/calico-secrets/etcd-cert" + etcd_key: "/calico-secrets/etcd-key" +--- +# The following contains k8s Secrets for use with a TLS enabled etcd cluster. +# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: calico-etcd-secrets + namespace: kube-system +data: + # Populate the following files with etcd TLS configuration if desired, but leave blank if + # not using TLS for etcd. + # This self-hosted install expects three files with the following names. The values + # should be base64 encoded strings of the entire contents of each file. + etcd-key: ${ETCD_KEY} + etcd-cert: ${ETCD_CERT} + etcd-ca: ${ETCD_CA} +--- +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + hostNetwork: true + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: ${_prefix}node:${CALICO_TAG} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Disable file logging so 'kubectl logs' works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Configure the IP Pool from which Pod IPs will be chosen. + - name: CALICO_IPV4POOL_CIDR + value: ${CALICO_IPV4POOL} + - name: CALICO_IPV4POOL_IPIP + value: "off" + - name: CALICO_IPV4POOL_NAT_OUTGOING + value: "true" + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Auto-detect the BGP IP address. + - name: IP + value: "" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + httpGet: + path: /liveness + port: 9099 + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: ${_prefix}cni:${CALICO_CNI_TAG} + command: ["/install-cni.sh"] + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the etcd TLS secrets. + - name: etcd-certs + secret: + secretName: calico-etcd-secrets +--- +# This manifest deploys the Calico Kubernetes controllers. +# See https://github.com/projectcalico/kube-controllers +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] +spec: + # The controllers can only have a single active instance. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + serviceAccountName: calico-kube-controllers + containers: + - name: calico-kube-controllers + image: ${_prefix}kube-controllers:${CALICO_KUBE_CONTROLLERS_TAG} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,profile,workloadendpoint,node + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Mount in the etcd TLS secrets. + - name: etcd-certs + secret: + secretName: calico-etcd-secrets +--- +# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then +# be removed entirely once the new kube-controllers deployment has been deployed above. +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy +spec: + # Turn this deployment off in favor of the kube-controllers deployment above. + replicas: 0 + strategy: + type: Recreate + template: + metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy + spec: + hostNetwork: true + serviceAccountName: calico-kube-controllers + containers: + - name: calico-policy-controller + image: ${_prefix}kube-controllers:${CALICO_KUBE_CONTROLLERS_TAG} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + +# Calico Version v2.6.7 +# https://docs.projectcalico.org/v2.6/releases#v2.6.7 +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + - nodes + verbs: + - watch + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system +EOF +} + +until curl -sf "http://127.0.0.1:8080/healthz" +do + echo "Waiting for Kubernetes API..." + sleep 5 +done + +/usr/bin/kubectl apply -f ${CALICO_DEPLOY} --namespace=kube-system diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh 2018-02-23 14:02:04.000000000 +0000 @@ -34,13 +34,20 @@ fi +_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} +atomic install \ +--system-package no \ +--system \ +--storage ostree \ +--name=etcd ${_prefix}etcd:${ETCD_TAG} + if [ -z "$KUBE_NODE_IP" ]; then # FIXME(yuanying): Set KUBE_NODE_IP correctly KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) fi myip="${KUBE_NODE_IP}" -cert_dir="/etc/kubernetes/certs" +cert_dir="/etc/etcd/certs" protocol="https" if [ "$TLS_DISABLED" = "True" ]; then diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh 2018-02-23 14:02:11.000000000 +0000 @@ -18,20 +18,21 @@ KUBE_API_ARGS="--runtime-config=api/all=true" KUBE_API_ARGS="$KUBE_API_ARGS --kubelet-preferred-address-types=InternalIP,Hostname,ExternalIP" +KUBE_API_ARGS="$KUBE_API_ARGS $KUBEAPI_OPTIONS" if [ "$TLS_DISABLED" == "True" ]; then KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0 --insecure-port=$KUBE_API_PORT" else KUBE_API_ADDRESS="--bind-address=0.0.0.0 --secure-port=$KUBE_API_PORT" # insecure port is used internaly - KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-port=8080" - KUBE_API_ARGS="$KUBE_API_ARGS --tls-cert-file=$CERT_DIR/server.crt" + KUBE_API_ADDRESS="$KUBE_API_ADDRESS --insecure-bind-address=127.0.0.1 --insecure-port=8080" + KUBE_API_ARGS="$KUBE_API_ARGS --authorization-mode=Node,RBAC --tls-cert-file=$CERT_DIR/server.crt" KUBE_API_ARGS="$KUBE_API_ARGS --tls-private-key-file=$CERT_DIR/server.key" KUBE_API_ARGS="$KUBE_API_ARGS --client-ca-file=$CERT_DIR/ca.crt" fi KUBE_ADMISSION_CONTROL="" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then - KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL_LIST}" + KUBE_ADMISSION_CONTROL="--admission-control=NodeRestriction,${ADMISSION_CONTROL_LIST}" fi if [ -n "$TRUST_ID" ]; then @@ -49,6 +50,7 @@ # Add controller manager args KUBE_CONTROLLER_MANAGER_ARGS="--leader-elect=true" +KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS $KUBECONTROLLER_OPTIONS" if [ -n "${ADMISSION_CONTROL_LIST}" ] && [ "${TLS_DISABLED}" == "False" ]; then KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --service-account-private-key-file=$CERT_DIR/server.key --root-ca-file=$CERT_DIR/ca.crt" fi @@ -57,6 +59,10 @@ KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cloud-config=/etc/kubernetes/kube_openstack_config --cloud-provider=openstack" fi +if [ -n "$CERT_MANAGER_API" ]; then + KUBE_CONTROLLER_MANAGER_ARGS="$KUBE_CONTROLLER_MANAGER_ARGS --cluster-signing-cert-file=$CERT_DIR/ca.crt --cluster-signing-key-file=$CERT_DIR/ca.key" +fi + sed -i ' /^KUBELET_ADDRESSES=/ s/=.*/="--machines='""'"/ /^KUBE_CONTROLLER_MANAGER_ARGS=/ s#\(KUBE_CONTROLLER_MANAGER_ARGS\).*#\1="'"${KUBE_CONTROLLER_MANAGER_ARGS}"'"# @@ -64,6 +70,11 @@ sed -i '/^KUBE_SCHEDULER_ARGS=/ s/=.*/="--leader-elect=true"/' /etc/kubernetes/scheduler +HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') +KUBELET_ARGS="--register-node=true --register-schedulable=false --pod-manifest-path=/etc/kubernetes/manifests --hostname-override=${HOSTNAME_OVERRIDE}" +KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" +KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}" + # For using default log-driver, other options should be ignored sed -i 's/\-\-log\-driver\=journald//g' /etc/sysconfig/docker diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh 2018-02-23 14:02:04.000000000 +0000 @@ -1,69 +1,89 @@ -#!/bin/sh +#!/bin/sh -x . /etc/sysconfig/heat-params echo "configuring kubernetes (minion)" _prefix=${CONTAINER_INFRA_PREFIX:-docker.io/openstackmagnum/} -atomic install --storage ostree --system --system-package=no --name=kubelet ${_prefix}kubernetes-kubelet:${KUBE_TAG} + +_addtl_mounts='' +if [ "$NETWORK_DRIVER" = "calico" ]; then + mkdir -p /opt/cni + _addtl_mounts=',{"type":"bind","source":"/opt/cni","destination":"/opt/cni","options":["bind","rw","slave","mode=777"]}' +fi + +atomic install --storage ostree --system --system-package=no --set=ADDTL_MOUNTS=${_addtl_mounts} --name=kubelet ${_prefix}kubernetes-kubelet:${KUBE_TAG} atomic install --storage ostree --system --system-package=no --name=kube-proxy ${_prefix}kubernetes-proxy:${KUBE_TAG} CERT_DIR=/etc/kubernetes/certs PROTOCOL=https -FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ --etcd-certfile $CERT_DIR/client.crt \ --etcd-keyfile $CERT_DIR/client.key" -ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ ---cert $CERT_DIR/client.crt --key $CERT_DIR/client.key" ETCD_SERVER_IP=${ETCD_SERVER_IP:-$KUBE_MASTER_IP} KUBE_PROTOCOL="https" -KUBECONFIG=/etc/kubernetes/kubeconfig.yaml -FLANNELD_CONFIG=/etc/sysconfig/flanneld +KUBELET_KUBECONFIG=/etc/kubernetes/kubelet-config.yaml +PROXY_KUBECONFIG=/etc/kubernetes/proxy-config.yaml if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL=http - FLANNEL_OPTIONS="" - ETCD_CURL_OPTIONS="" KUBE_PROTOCOL="http" fi -sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG - -cat >> $FLANNELD_CONFIG <> ${KUBECONFIG} +HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') +cat << EOF >> ${KUBELET_KUBECONFIG} apiVersion: v1 +clusters: +- cluster: + certificate-authority: ${CERT_DIR}/ca.crt + server: ${KUBE_MASTER_URI} + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: system:node:${HOSTNAME_OVERRIDE} + name: default +current-context: default kind: Config +preferences: {} users: -- name: kubeclient +- name: system:node:${HOSTNAME_OVERRIDE} user: - client-certificate: ${CERT_DIR}/client.crt - client-key: ${CERT_DIR}/client.key + as-user-extra: {} + client-certificate: ${CERT_DIR}/kubelet.crt + client-key: ${CERT_DIR}/kubelet.key +EOF +cat << EOF >> ${PROXY_KUBECONFIG} +apiVersion: v1 clusters: -- name: kubernetes - cluster: - server: ${KUBE_MASTER_URI} +- cluster: certificate-authority: ${CERT_DIR}/ca.crt + server: ${KUBE_MASTER_URI} + name: kubernetes contexts: - context: cluster: kubernetes - user: kubeclient - name: service-account-context -current-context: service-account-context + user: kube-proxy + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: kube-proxy + user: + as-user-extra: {} + client-certificate: ${CERT_DIR}/proxy.crt + client-key: ${CERT_DIR}/proxy.key EOF if [ "$TLS_DISABLED" = "True" ]; then - sed -i 's/^.*user:$//' ${KUBECONFIG} - sed -i 's/^.*client-certificate.*$//' ${KUBECONFIG} - sed -i 's/^.*client-key.*$//' ${KUBECONFIG} - sed -i 's/^.*certificate-authority.*$//' ${KUBECONFIG} + sed -i 's/^.*user:$//' ${KUBELET_KUBECONFIG} + sed -i 's/^.*client-certificate.*$//' ${KUBELET_KUBECONFIG} + sed -i 's/^.*client-key.*$//' ${KUBELET_KUBECONFIG} + sed -i 's/^.*certificate-authority.*$//' ${KUBELET_KUBECONFIG} fi -chmod 0644 ${KUBECONFIG} +chmod 0644 ${KUBELET_KUBECONFIG} +chmod 0644 ${PROXY_KUBECONFIG} sed -i ' /^KUBE_ALLOW_PRIV=/ s/=.*/="--allow-privileged='"$KUBE_ALLOW_PRIV"'"/ @@ -77,9 +97,10 @@ # The hostname of the node is set to be the Nova name of the instance, and # the option --hostname-override for kubelet uses the hostname to register the node. # Using any other name will break the load balancer and cinder volume features. -HOSTNAME_OVERRIDE=$(hostname --short | sed 's/\.novalocal//') -KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --cadvisor-port=4194 --kubeconfig ${KUBECONFIG} --hostname-override=${HOSTNAME_OVERRIDE}" +mkdir -p /etc/kubernetes/manifests +KUBELET_ARGS="--pod-manifest-path=/etc/kubernetes/manifests --cadvisor-port=4194 --kubeconfig ${KUBELET_KUBECONFIG} --hostname-override=${HOSTNAME_OVERRIDE}" KUBELET_ARGS="${KUBELET_ARGS} --cluster_dns=${DNS_SERVICE_IP} --cluster_domain=${DNS_CLUSTER_DOMAIN}" +KUBELET_ARGS="${KUBELET_ARGS} ${KUBELET_OPTIONS}" if [ -n "$TRUST_ID" ]; then KUBELET_ARGS="$KUBELET_ARGS --cloud-provider=openstack --cloud-config=/etc/kubernetes/kube_openstack_config" @@ -99,23 +120,54 @@ fi # specified cgroup driver -KUBELET_ARGS="${KUBELET_ARGS} --cgroup-driver=systemd" +KUBELET_ARGS="${KUBELET_ARGS} --client-ca-file=${CERT_DIR}/ca.crt --tls-cert-file=${CERT_DIR}/kubelet.crt --tls-private-key-file=${CERT_DIR}/kubelet.key --cgroup-driver=systemd" + +cat > /etc/kubernetes/get_require_kubeconfig.sh <> $FLANNELD_CONFIG < ${CORE_DNS} apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +--- +apiVersion: v1 kind: ConfigMap metadata: name: coredns @@ -19,7 +60,9 @@ errors log stdout health - kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR} + kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR} ${PODS_NETWORK_CIDR} { + pods verified + } proxy . /etc/resolv.conf cache 30 } @@ -31,7 +74,6 @@ namespace: kube-system labels: k8s-app: coredns - kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: replicas: 1 @@ -42,13 +84,16 @@ metadata: labels: k8s-app: coredns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: + serviceAccountName: coredns + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "CriticalAddonsOnly" + operator: "Exists" containers: - name: coredns - image: ${_prefix}coredns:011 + image: ${_prefix}coredns:1.0.1 imagePullPolicy: Always args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: @@ -61,6 +106,9 @@ - containerPort: 53 name: dns-tcp protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP livenessProbe: httpGet: path: /health @@ -99,6 +147,9 @@ - name: dns-tcp port: 53 protocol: TCP + - name: metrics + port: 9153 + protocol: TCP EOF } diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,15 @@ +#!/bin/bash + +. /etc/sysconfig/heat-params + +if [ "$(echo $CERT_MANAGER_API | tr '[:upper:]' '[:lower:]')" = "false" ]; then + exit 0 +fi + +cert_dir=/etc/kubernetes/certs + +echo -e "$CA_KEY" > ${cert_dir}/ca.key + +chown kube.kube ${cert_dir}/ca.key +chmod 400 ${cert_dir}/ca.key + diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,23 @@ +#!/bin/bash + +# Enables the specified ingress controller. +# +# Currently there is only support for traefik. +. /etc/sysconfig/heat-params + +function writeFile { + # $1 is filename + # $2 is file content + + [ -f ${1} ] || { + echo "Writing File: $1" + mkdir -p $(dirname ${1}) + cat << EOF > ${1} +$2 +EOF + } +} + +if [ "$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]')" = "traefik" ]; then + $enable-ingress-traefik +fi diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,147 @@ +INGRESS_TRAEFIK_MANIFEST=/srv/magnum/kubernetes/ingress-traefik.yaml +INGRESS_TRAEFIK_MANIFEST_CONTENT=$(cat < ${KUBE_DASH_DEPLOY} +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration to deploy release version of the Dashboard UI compatible with +# Kubernetes 1.8. +# +# Example usage: kubectl create -f + +# ------------------- Dashboard Secret ------------------- # + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kube-system +type: Opaque + +--- +# ------------------- Dashboard Service Account ------------------- # + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kube-system + +--- +# ------------------- Dashboard Role & Role Binding ------------------- # + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubernetes-dashboard-minimal + namespace: kube-system +rules: + # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. +- apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] + # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics from heapster. +- apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster"] + verbs: ["proxy"] +- apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:"] + verbs: ["get"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: kubernetes-dashboard-minimal + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard-minimal +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system + +--- +# ------------------- Dashboard Deployment ------------------- # + kind: Deployment -apiVersion: extensions/v1beta1 +apiVersion: apps/v1beta2 metadata: labels: - app: kubernetes-dashboard + k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: @@ -30,116 +123,319 @@ revisionHistoryLimit: 10 selector: matchLabels: - app: kubernetes-dashboard + k8s-app: kubernetes-dashboard template: metadata: labels: - app: kubernetes-dashboard - # Comment the following annotation if Dashboard must not be deployed on master - annotations: - scheduler.alpha.kubernetes.io/tolerations: | - [ - { - "key": "dedicated", - "operator": "Equal", - "value": "master", - "effect": "NoSchedule" - } - ] + k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP image: ${KUBE_DASH_IMAGE} - imagePullPolicy: Always ports: - - containerPort: 9090 + - containerPort: 8443 protocol: TCP args: + - --auto-generate-certificates + - --heapster-host=heapster:80 + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume livenessProbe: httpGet: + scheme: HTTPS path: / - port: 9090 + port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 -EOF -} + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- +# ------------------- Dashboard Service ------------------- # -KUBE_DASH_SVC=/srv/kubernetes/manifests/kube-dash-svc.yaml -[ -f ${KUBE_DASH_SVC} ] || { - echo "Writing File: $KUBE_DASH_SVC" - mkdir -p $(dirname ${KUBE_DASH_SVC}) - cat << EOF > ${KUBE_DASH_SVC} kind: Service apiVersion: v1 metadata: labels: - app: kubernetes-dashboard + k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: - type: NodePort ports: - - port: 80 - targetPort: 9090 + - port: 443 + targetPort: 8443 selector: - app: kubernetes-dashboard + k8s-app: kubernetes-dashboard +--- +# Grant admin privileges to the dashboard serviceacount + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + labels: + k8s-app: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: kube-system EOF } -KUBE_DASH_BIN=/usr/local/bin/kube-dash -[ -f ${KUBE_DASH_BIN} ] || { - echo "Writing File: $KUBE_DASH_BIN" - mkdir -p $(dirname ${KUBE_DASH_BIN}) - cat << EOF > ${KUBE_DASH_BIN} -#!/bin/sh -until curl -sf "http://127.0.0.1:8080/healthz" -do - echo "Waiting for Kubernetes API..." - sleep 5 -done - -#echo check for existence of kubernetes-dashboard deployment -/usr/bin/kubectl get deployment kubernetes-dashboard --namespace=kube-system +INFLUX_SINK="" +# Deploy INFLUX AND GRAFANA +if [ "$(echo $INFLUX_GRAFANA_DASHBOARD_ENABLED | tr '[:upper:]' '[:lower:]')" == "true" ]; then + INFLUX_SINK=" - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086" + INFLUX_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-influxdb-amd64:v1.3.3" + GRAFANA_IMAGE="${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}heapster-grafana-amd64:v4.4.3" + + INFLUX_DEPLOY=/srv/magnum/kubernetes/influxdb.yaml + GRAFANA_DEPLOY=/srv/magnum/kubernetes/grafana.yaml + + [ -f ${INFLUX_DEPLOY} ] || { + echo "Writing File: $INFLUX_DEPLOY" + mkdir -p $(dirname ${INFLUX_DEPLOY}) + cat << EOF > ${INFLUX_DEPLOY} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: monitoring-influxdb + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + task: monitoring + k8s-app: influxdb + spec: + containers: + - name: influxdb + image: ${INFLUX_IMAGE} + volumeMounts: + - mountPath: /data + name: influxdb-storage + volumes: + - name: influxdb-storage + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + task: monitoring + # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) + # If you are NOT using this as an addon, you should comment out this line. + # kubernetes.io/cluster-service: 'true' + kubernetes.io/name: monitoring-influxdb + name: monitoring-influxdb + namespace: kube-system +spec: + ports: + - port: 8086 + targetPort: 8086 + selector: + k8s-app: influxdb +EOF + } -if [ "\$?" != "0" ]; then - /usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-dash-deploy.yaml --namespace=kube-system -fi + [ -f ${GRAFANA_DEPLOY} ] || { + echo "Writing File: $GRAFANA_DEPLOY" + mkdir -p $(dirname ${GRAFANA_DEPLOY}) + cat << EOF > ${GRAFANA_DEPLOY} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: monitoring-grafana + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + task: monitoring + k8s-app: grafana + spec: + containers: + - name: grafana + image: ${GRAFANA_IMAGE} + ports: + - containerPort: 3000 + protocol: TCP + volumeMounts: + - mountPath: /etc/ssl/certs + name: ca-certificates + readOnly: true + - mountPath: /var + name: grafana-storage + env: + - name: INFLUXDB_HOST + value: monitoring-influxdb + - name: GF_SERVER_HTTP_PORT + value: "3000" + # The following env variables are required to make Grafana accessible via + # the kubernetes api-server proxy. On production clusters, we recommend + # removing these env variables, setup auth for grafana, and expose the grafana + # service using a LoadBalancer or a public IP. + - name: GF_AUTH_BASIC_ENABLED + value: "false" + - name: GF_AUTH_ANONYMOUS_ENABLED + value: "true" + - name: GF_AUTH_ANONYMOUS_ORG_ROLE + value: Admin + - name: GF_SERVER_ROOT_URL + # If you're only using the API Server proxy, set this value instead: + # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy + value: / + volumes: + - name: ca-certificates + hostPath: + path: /etc/ssl/certs + - name: grafana-storage + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) + # If you are NOT using this as an addon, you should comment out this line. + # kubernetes.io/cluster-service: 'true' + kubernetes.io/name: monitoring-grafana + name: monitoring-grafana + namespace: kube-system +spec: + # In a production setup, we recommend accessing Grafana through an external Loadbalancer + # or through a public IP. + # type: LoadBalancer + # You could also use NodePort to expose the service at a randomly-generated port + # type: NodePort + ports: + - port: 80 + targetPort: 3000 + selector: + k8s-app: grafana +EOF + } -#echo check for existence of kubernetes-dashboard service -/usr/bin/kubectl get service kubernetes-dashboard --namespace=kube-system + echo "Waiting for Kubernetes API..." + until curl --silent "http://127.0.0.1:8080/version" + do + sleep 5 + done -if [ "\$?" != "0" ]; then - /usr/bin/kubectl create -f /srv/kubernetes/manifests/kube-dash-svc.yaml --namespace=kube-system + kubectl apply --validate=false -f $INFLUX_DEPLOY + kubectl apply --validate=false -f $GRAFANA_DEPLOY fi -EOF -} -KUBE_DASH_SERVICE=/etc/systemd/system/kube-dash.service -[ -f ${KUBE_DASH_SERVICE} ] || { - echo "Writing File: $KUBE_DASH_SERVICE" - mkdir -p $(dirname ${KUBE_DASH_SERVICE}) - cat << EOF > ${KUBE_DASH_SERVICE} -[Unit] -After=kube-system-namespace.service -Requires=kubelet.service -Wants=kube-system-namespace.service - -[Service] -Type=oneshot -Environment=HOME=/root -EnvironmentFile=-/etc/kubernetes/config -ExecStart=${KUBE_DASH_BIN} +# Deploy Heapster +HEAPSTER_DEPLOY=/srv/magnum/kubernetes/heapster-controller.yaml -[Install] -WantedBy=multi-user.target +[ -f ${HEAPSTER_DEPLOY} ] || { + echo "Writing File: $HEAPSTER_DEPLOY" + mkdir -p $(dirname ${HEAPSTER_DEPLOY}) + cat << EOF > ${HEAPSTER_DEPLOY} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: heapster + namespace: kube-system +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: heapster + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + task: monitoring + k8s-app: heapster + spec: + serviceAccountName: heapster + containers: + - name: heapster + image: ${HEAPSTER_IMAGE} + imagePullPolicy: IfNotPresent + command: + - /heapster + - --source=kubernetes:https://kubernetes.default +${INFLUX_SINK} +--- +apiVersion: v1 +kind: Service +metadata: + labels: + task: monitoring + # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) + # If you are NOT using this as an addon, you should comment out this line. + kubernetes.io/cluster-service: 'true' + kubernetes.io/name: Heapster + name: heapster + namespace: kube-system +spec: + ports: + - port: 80 + targetPort: 8082 + selector: + k8s-app: heapster +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: heapster +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:heapster +subjects: +- kind: ServiceAccount + name: heapster + namespace: kube-system EOF } -chown root:root ${KUBE_DASH_BIN} -chmod 0755 ${KUBE_DASH_BIN} - -chown root:root ${KUBE_DASH_SERVICE} -chmod 0644 ${KUBE_DASH_SERVICE} +echo "Waiting for Kubernetes API..." +until curl --silent "http://127.0.0.1:8080/version" +do + sleep 5 +done -systemctl enable kube-dash -systemctl start --no-block kube-dash +kubectl apply --validate=false -f $KUBE_DASH_DEPLOY +kubectl apply --validate=false -f $HEAPSTER_DEPLOY diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,71 +0,0 @@ -#!/bin/sh - -# this service required because docker will start only after cloud init was finished -# due service dependencies at Fedora Atomic (docker <- docker-storage-setup <- cloud-final) - -. /etc/sysconfig/heat-params - -KUBE_SYSTEM_JSON=/srv/kubernetes/kube-system-namespace.json -[ -f ${KUBE_SYSTEM_JSON} ] || { - echo "Writing File: $KUBE_SYSTEM_JSON" - mkdir -p $(dirname ${KUBE_SYSTEM_JSON}) - cat << EOF > ${KUBE_SYSTEM_JSON} -{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "kube-system" - } -} -EOF -} - -KUBE_SYSTEM_BIN=/usr/local/bin/kube-system-namespace -[ -f ${KUBE_SYSTEM_BIN} ] || { - echo "Writing File: $KUBE_SYSTEM_BIN" - mkdir -p $(dirname ${KUBE_SYSTEM_BIN}) - cat << EOF > ${KUBE_SYSTEM_BIN} -#!/bin/sh -until curl -sf "http://127.0.0.1:8080/healthz" -do - echo "Waiting for Kubernetes API..." - sleep 5 -done - -#check for existence of namespace -/usr/bin/kubectl get namespace kube-system - -if [ "\$?" != "0" ]; then - /usr/bin/kubectl create -f /srv/kubernetes/kube-system-namespace.json -fi -EOF -} - -KUBE_SYSTEM_SERVICE=/etc/systemd/system/kube-system-namespace.service -[ -f ${KUBE_SYSTEM_SERVICE} ] || { - echo "Writing File: $KUBE_SYSTEM_SERVICE" - mkdir -p $(dirname ${KUBE_SYSTEM_SERVICE}) - cat << EOF > ${KUBE_SYSTEM_SERVICE} -[Unit] -After=kubelet.service -Requires=kubelet.service - -[Service] -Type=oneshot -Environment=HOME=/root -EnvironmentFile=-/etc/kubernetes/config -ExecStart=${KUBE_SYSTEM_BIN} - -[Install] -WantedBy=multi-user.target -EOF -} - -chown root:root ${KUBE_SYSTEM_BIN} -chmod 0755 ${KUBE_SYSTEM_BIN} - -chown root:root ${KUBE_SYSTEM_SERVICE} -chmod 0644 ${KUBE_SYSTEM_SERVICE} - -systemctl enable kube-system-namespace -systemctl start --no-block kube-system-namespace diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh 2018-02-23 14:02:04.000000000 +0000 @@ -35,12 +35,14 @@ mkdir -p "$cert_dir" CA_CERT=$cert_dir/ca.crt -CLIENT_CERT=$cert_dir/client.crt -CLIENT_CSR=$cert_dir/client.csr -CLIENT_KEY=$cert_dir/client.key -#Get a token by user credentials and trust -auth_json=$(cat << EOF +function generate_certificates { + _CERT=$cert_dir/${1}.crt + _CSR=$cert_dir/${1}.csr + _KEY=$cert_dir/${1}.key + _CONF=$2 + #Get a token by user credentials and trust + auth_json=$(cat << EOF { "auth": { "identity": { @@ -59,52 +61,76 @@ EOF ) -content_type='Content-Type: application/json' -url="$AUTH_URL/auth/tokens" -USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \ - | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` - -# Get CA certificate for this cluster -curl $VERIFY_CA -X GET \ - -H "X-Auth-Token: $USER_TOKEN" \ - -H "OpenStack-API-Version: container-infra latest" \ - $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT + content_type='Content-Type: application/json' + url="$AUTH_URL/auth/tokens" + USER_TOKEN=`curl $VERIFY_CA -s -i -X POST -H "$content_type" -d "$auth_json" $url \ + | grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'` + + # Get CA certificate for this cluster + curl $VERIFY_CA -X GET \ + -H "X-Auth-Token: $USER_TOKEN" \ + -H "OpenStack-API-Version: container-infra latest" \ + $MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT + + # Generate client's private key and csr + openssl genrsa -out "${_KEY}" 4096 + chmod 400 "${_KEY}" + openssl req -new -days 1000 \ + -key "${_KEY}" \ + -out "${_CSR}" \ + -reqexts req_ext \ + -config "${_CONF}" + + # Send csr to Magnum to have it signed + csr_req=$(python -c "import json; fp = open('${_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()") + curl $VERIFY_CA -X POST \ + -H "X-Auth-Token: $USER_TOKEN" \ + -H "OpenStack-API-Version: container-infra latest" \ + -H "Content-Type: application/json" \ + -d "$csr_req" \ + $MAGNUM_URL/certificates | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${_CERT} +} + +#Kubelet Certs +INSTANCE_NAME=$(hostname --short | sed 's/\.novalocal//') + +cat > ${cert_dir}/kubelet.conf < ${cert_dir}/client.conf < ${cert_dir}/proxy.conf < ${CLIENT_CERT} +generate_certificates kubelet ${cert_dir}/kubelet.conf +generate_certificates proxy ${cert_dir}/proxy.conf # Common certs and key are created for both etcd and kubernetes services. # Both etcd and kube user should have permission to access the certs and key. @@ -113,4 +139,7 @@ usermod -a -G kube_etcd kube chmod 550 "${cert_dir}" chown -R kube:kube_etcd "${cert_dir}" -chmod 440 $CLIENT_KEY +chmod 440 ${cert_dir}/kubelet.key +chmod 440 ${cert_dir}/proxy.key +mkdir -p /etc/flanneld/certs +cp ${cert_dir}/* /etc/flanneld/certs diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh 2018-02-23 14:02:04.000000000 +0000 @@ -109,7 +109,7 @@ req_extensions = req_ext prompt = no [req_distinguished_name] -CN = kubernetes.default.svc +CN = kubernetes [req_ext] subjectAltName = ${sans} extendedKeyUsage = clientAuth,serverAuth @@ -141,3 +141,5 @@ chmod 550 "${cert_dir}" chown -R kube:kube_etcd "${cert_dir}" chmod 440 $SERVER_KEY +mkdir -p /etc/etcd/certs +cp ${cert_dir}/* /etc/etcd/certs diff -Nru magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh --- magnum-6.0.1/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh 2018-02-23 14:02:04.000000000 +0000 @@ -7,25 +7,15 @@ fi CERT_DIR=/etc/kubernetes/certs PROTOCOL=https -FLANNEL_OPTIONS="-etcd-cafile $CERT_DIR/ca.crt \ --etcd-certfile $CERT_DIR/server.crt \ --etcd-keyfile $CERT_DIR/server.key" ETCD_CURL_OPTIONS="--cacert $CERT_DIR/ca.crt \ --cert $CERT_DIR/server.crt --key $CERT_DIR/server.key" FLANNELD_CONFIG=/etc/sysconfig/flanneld if [ "$TLS_DISABLED" = "True" ]; then PROTOCOL=http - FLANNEL_OPTIONS="" ETCD_CURL_OPTIONS="" fi -sed -i '/FLANNEL_OPTIONS/'d $FLANNELD_CONFIG - -cat >> $FLANNELD_CONFIG < /etc/sysconfig/flanneld < affinity policy for nodes server group @@ -459,6 +464,7 @@ kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} + influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_kube_master_id: {get_resource: secgroup_master} http_proxy: {get_param: http_proxy} diff -Nru magnum-6.0.1/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml --- magnum-6.0.1/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_coreos_v1/templates/kubemaster.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -115,6 +115,10 @@ type: boolean description: whether or not to disable kubernetes dashboard + influx_grafana_dashboard_enabled: + type: boolean + description: whether or not to disable kubernetes dashboard + verify_ca: type: boolean description: whether or not to validate certificate authority @@ -293,6 +297,7 @@ "$TLS_DISABLED": {get_param: tls_disabled} "$VERIFY_CA": {get_param: verify_ca} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} + "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: enable_influx_grafana_dashboard} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$CLUSTER_UUID": {get_param: cluster_uuid} diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/driver.py magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/driver.py --- magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/driver.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/driver.py 2018-02-23 14:02:04.000000000 +0000 @@ -13,7 +13,6 @@ # under the License. from magnum.drivers.common import k8s_monitor -from magnum.drivers.common.k8s_scale_manager import K8sScaleManager from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_atomic_v1 import template_def @@ -35,4 +34,7 @@ return k8s_monitor.K8sMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): - return K8sScaleManager(context, osclient, cluster) + # FIXME: Until the kubernetes client is fixed, remove + # the scale_manager. + # https://bugs.launchpad.net/magnum/+bug/1746510 + return None diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml --- magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -250,6 +250,11 @@ description: whether or not to enable kubernetes dashboard default: True + influx_grafana_dashboard_enabled: + type: boolean + description: Enable influxdb with grafana dashboard for data from heapster + default: False + verify_ca: type: boolean description: whether or not to validate certificate authority @@ -313,17 +318,27 @@ kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster - default: v1.7.4 + default: v1.9.3 + + etcd_tag: + type: string + description: tag of the etcd system container + default: v3.2.7 + + flannel_tag: + type: string + description: tag of the flannel system containers + default: v0.9.0 kube_version: type: string description: version of kubernetes used for kubernetes cluster - default: v1.7.4 + default: v1.9.3 kube_dashboard_version: type: string description: version of kubernetes dashboard used for kubernetes cluster - default: v1.5.1 + default: v1.8.3 insecure_registry_url: type: string @@ -370,6 +385,83 @@ availability zone for master and nodes default: "" + cert_manager_api: + type: boolean + description: true if the kubernetes cert api manager should be enabled + default: false + + ca_key: + type: string + description: key of internal ca for the kube certificate api manager + default: "" + hidden: true + + calico_tag: + type: string + description: tag of the calico containers used to provision the calico node + default: v2.6.7 + + calico_cni_tag: + type: string + description: tag of the cni used to provision the calico node + default: v1.11.2 + + calico_kube_controllers_tag: + type: string + description: tag of the kube_controllers used to provision the calico node + default: v1.0.3 + + calico_ipv4pool: + type: string + description: Configure the IP pool from which Pod IPs will be chosen + default: "192.168.0.0/16" + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + ingress_controller: + type: string + description: > + ingress controller backend to use + default: "" + + ingress_controller_role: + type: string + description: > + node role where the ingress controller backend should run + default: "ingress" + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + default: "" + + kubeapi_options: + type: string + description: > + additional options to be passed to the api + default: "" + + kubecontroller_options: + type: string + description: > + additional options to be passed to the controller manager + default: "" + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + default: "" + + kubescheduler_options: + type: string + description: > + additional options to be passed to the scheduler + default: "" + resources: ###################################################################### @@ -544,6 +636,7 @@ kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} + influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_kube_master_id: {get_resource: secgroup_kube_master} http_proxy: {get_param: http_proxy} @@ -551,6 +644,7 @@ no_proxy: {get_param: no_proxy} kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} + etcd_tag: {get_param: etcd_tag} kube_dashboard_version: {get_param: kube_dashboard_version} trustee_user_id: {get_param: trustee_user_id} trustee_password: {get_param: trustee_password} @@ -564,6 +658,20 @@ openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} + ca_key: {get_param: ca_key} + cert_manager_api: {get_param: cert_manager_api} + calico_tag: {get_param: calico_tag} + calico_cni_tag: {get_param: calico_cni_tag} + calico_kube_controllers_tag: {get_param: calico_kube_controllers_tag} + calico_ipv4pool: {get_param: calico_ipv4pool} + pods_network_cidr: {get_param: pods_network_cidr} + ingress_controller: {get_param: ingress_controller} + ingress_controller_role: {get_param: ingress_controller_role} + kubelet_options: {get_param: kubelet_options} + kubeapi_options: {get_param: kubeapi_options} + kubeproxy_options: {get_param: kubeproxy_options} + kubecontroller_options: {get_param: kubecontroller_options} + kubescheduler_options: {get_param: kubescheduler_options} ###################################################################### # @@ -623,6 +731,7 @@ no_proxy: {get_param: no_proxy} kube_tag: {get_param: kube_tag} kube_version: {get_param: kube_version} + flannel_tag: {get_param: flannel_tag} trustee_user_id: {get_param: trustee_user_id} trustee_username: {get_param: trustee_username} trustee_password: {get_param: trustee_password} @@ -635,6 +744,9 @@ openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} availability_zone: {get_param: availability_zone} + pods_network_cidr: {get_param: pods_network_cidr} + kubelet_options: {get_param: kubelet_options} + kubeproxy_options: {get_param: kubeproxy_options} outputs: diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml --- magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -112,6 +112,10 @@ type: boolean description: whether or not to disable kubernetes dashboard + influx_grafana_dashboard_enabled: + type: boolean + description: Enable influxdb with grafana dashboard for data from heapster + verify_ca: type: boolean description: whether or not to validate certificate authority @@ -211,6 +215,10 @@ type: string description: tag of the k8s containers used to provision the kubernetes cluster + etcd_tag: + type: string + description: tag of the etcd system container + kube_version: type: string description: version of kubernetes used for kubernetes cluster @@ -273,6 +281,71 @@ availability zone for master and nodes default: "" + ca_key: + type: string + description: key of internal ca for the kube certificate api manager + hidden: true + + cert_manager_api: + type: boolean + description: true if the kubernetes cert api manager should be enabled + default: false + + calico_tag: + type: string + description: tag of the calico containers used to provision the calico node + + calico_cni_tag: + type: string + description: tag of the cni used to provision the calico node + + calico_kube_controllers_tag: + type: string + description: tag of the kube_controllers used to provision the calico node + + calico_ipv4pool: + type: string + description: Configure the IP pool from which Pod IPs will be chosen + + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + ingress_controller: + type: string + description: > + ingress controller backend to use + + ingress_controller_role: + type: string + description: > + node role where the ingress controller should run + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + + kubeapi_options: + type: string + description: > + additional options to be passed to the api + + kubecontroller_options: + type: string + description: > + additional options to be passed to the controller manager + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + + kubescheduler_options: + type: string + description: > + additional options to be passed to the scheduler + resources: master_wait_handle: @@ -331,6 +404,7 @@ "$FLANNEL_BACKEND": {get_param: flannel_backend} "$SYSTEM_PODS_INITIAL_DELAY": {get_param: system_pods_initial_delay} "$SYSTEM_PODS_TIMEOUT": {get_param: system_pods_timeout} + "$PODS_NETWORK_CIDR": {get_param: pods_network_cidr} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} "$ADMISSION_CONTROL_LIST": {get_param: admission_control_list} "$ETCD_DISCOVERY_URL": {get_param: discovery_url} @@ -340,6 +414,7 @@ "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} + "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} "$VERIFY_CA": {get_param: verify_ca} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} @@ -348,6 +423,7 @@ "$HTTPS_PROXY": {get_param: https_proxy} "$NO_PROXY": {get_param: no_proxy} "$KUBE_TAG": {get_param: kube_tag} + "$ETCD_TAG": {get_param: etcd_tag} "$KUBE_VERSION": {get_param: kube_version} "$KUBE_DASHBOARD_VERSION": {get_param: kube_dashboard_version} "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} @@ -359,6 +435,19 @@ "$ETCD_LB_VIP": {get_param: etcd_lb_vip} "$DNS_SERVICE_IP": {get_param: dns_service_ip} "$DNS_CLUSTER_DOMAIN": {get_param: dns_cluster_domain} + "$CERT_MANAGER_API": {get_param: cert_manager_api} + "$CA_KEY": {get_param: ca_key} + "$CALICO_TAG": {get_param: calico_tag} + "$CALICO_CNI_TAG": {get_param: calico_cni_tag} + "$CALICO_KUBE_CONTROLLERS_TAG": {get_param: calico_kube_controllers_tag} + "$CALICO_IPV4POOL": {get_param: calico_ipv4pool} + "$INGRESS_CONTROLLER": {get_param: ingress_controller} + "$INGRESS_CONTROLLER_ROLE": {get_param: ingress_controller_role} + "$KUBELET_OPTIONS": {get_param: kubelet_options} + "$KUBEAPI_OPTIONS": {get_param: kubeapi_options} + "$KUBECONTROLLER_OPTIONS": {get_param: kubecontroller_options} + "$KUBEPROXY_OPTIONS": {get_param: kubeproxy_options} + "$KUBESCHEDULER_OPTIONS": {get_param: kubescheduler_options} install_openstack_ca: type: OS::Heat::SoftwareConfig @@ -422,23 +511,11 @@ group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-services-master.sh} - network_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/network-service.sh} - - kube_system_namespace_service: + kube_apiserver_to_kubelet_role: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-system-namespace-service.sh} - - kube_ui_service: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh} + config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh} core_dns_service: type: OS::Heat::SoftwareConfig @@ -479,18 +556,16 @@ - config: {get_resource: write_heat_params} - config: {get_resource: configure_etcd} - config: {get_resource: write_kube_os_config} - - config: {get_resource: make_cert} - config: {get_resource: configure_docker_storage} - config: {get_resource: configure_kubernetes} + - config: {get_resource: make_cert} - config: {get_resource: add_proxy} + - config: {get_resource: start_container_agent} - config: {get_resource: enable_services} - config: {get_resource: write_network_config} - config: {get_resource: network_config_service} - - config: {get_resource: network_service} - - config: {get_resource: kube_system_namespace_service} + - config: {get_resource: kube_apiserver_to_kubelet_role} - config: {get_resource: core_dns_service} - - config: {get_resource: kube_ui_service} - - config: {get_resource: start_container_agent} - config: {get_resource: master_wc_notify} enable_prometheus_monitoring: @@ -511,6 +586,70 @@ server: {get_resource: kube-master} actions: ['CREATE'] + enable_cert_manager_api: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + str_replace: + template: {get_file: ../../common/templates/kubernetes/fragments/enable-cert-api-manager} + params: + "$CA_KEY": {get_param: ca_key} + + enable_cert_manager_api_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: enable_cert_manager_api} + server: {get_resource: kube-master} + actions: ['CREATE'] + + calico_service: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: ../../common/templates/kubernetes/fragments/calico-service.sh} + + calico_service_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: calico_service} + server: {get_resource: kube-master} + actions: ['CREATE'] + + enable_ingress_controller: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: + str_replace: + params: + $enable-ingress-traefik: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-traefik} + template: {get_file: ../../common/templates/kubernetes/fragments/enable-ingress-controller} + + enable_ingress_controller_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: enable_ingress_controller} + server: {get_resource: kube-master} + actions: ['CREATE'] + + kubernetes_dashboard: + type: OS::Heat::SoftwareConfig + properties: + group: script + config: {get_file: ../../common/templates/kubernetes/fragments/kube-dashboard-service.sh} + + kubernetes_dashboard_deployment: + type: OS::Heat::SoftwareDeployment + properties: + signal_transport: HEAT_SIGNAL + config: {get_resource: kubernetes_dashboard} + server: {get_resource: kube-master} + actions: ['CREATE'] + ###################################################################### # # a single kubernetes master. @@ -543,7 +682,7 @@ fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} + - ip_address: {get_param: pods_network_cidr} replacement_policy: AUTO kube_master_floating: diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml --- magnum-6.0.1/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -175,6 +175,10 @@ type: string description: tag of the k8s containers used to provision the kubernetes cluster + flannel_tag: + type: string + description: tag of the flannel system containers + kube_version: type: string description: version of kubernetes used for kubernetes cluster @@ -241,6 +245,20 @@ availability zone for master and nodes default: "" + pods_network_cidr: + type: string + description: Configure the IP pool/range from which pod IPs will be chosen + + kubelet_options: + type: string + description: > + additional options to be passed to the kubelet + + kubeproxy_options: + type: string + description: > + additional options to be passed to the kube proxy + resources: minion_wait_handle: @@ -296,6 +314,7 @@ $HTTPS_PROXY: {get_param: https_proxy} $NO_PROXY: {get_param: no_proxy} $KUBE_TAG: {get_param: kube_tag} + $FLANNEL_TAG: {get_param: flannel_tag} $KUBE_VERSION: {get_param: kube_version} $WAIT_CURL: {get_attr: [minion_wait_handle, curl_cli]} $TRUSTEE_USER_ID: {get_param: trustee_user_id} @@ -306,6 +325,8 @@ $CONTAINER_INFRA_PREFIX: {get_param: container_infra_prefix} $DNS_SERVICE_IP: {get_param: dns_service_ip} $DNS_CLUSTER_DOMAIN: {get_param: dns_cluster_domain} + $KUBELET_OPTIONS: {get_param: kubelet_options} + $KUBEPROXY_OPTIONS: {get_param: kubeproxy_options} install_openstack_ca: type: OS::Heat::SoftwareConfig @@ -455,7 +476,7 @@ fixed_ips: - subnet: {get_param: fixed_subnet} allowed_address_pairs: - - ip_address: {get_param: flannel_network_cidr} + - ip_address: {get_param: pods_network_cidr} replacement_policy: AUTO kube_minion_floating: diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_ironic_v1/driver.py magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/driver.py --- magnum-6.0.1/magnum/drivers/k8s_fedora_ironic_v1/driver.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/driver.py 2018-02-23 14:02:04.000000000 +0000 @@ -13,7 +13,6 @@ # under the License. from magnum.drivers.common import k8s_monitor -from magnum.drivers.common.k8s_scale_manager import K8sScaleManager from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_ironic_v1 import template_def @@ -35,4 +34,7 @@ return k8s_monitor.K8sMonitor(context, cluster) def get_scale_manager(self, context, osclient, cluster): - return K8sScaleManager(context, osclient, cluster) + # FIXME: Until the kubernetes client is fixed, remove + # the scale_manager. + # https://bugs.launchpad.net/magnum/+bug/1746510 + return None diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml --- magnum-6.0.1/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubecluster.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -248,6 +248,11 @@ description: whether or not to disable kubernetes dashboard default: True + influx_grafana_dashboard_enabled: + type: boolean + description: Enable influxdb with grafana dashboard for data from heapster + default: False + verify_ca: type: boolean description: whether or not to validate certificate authority @@ -316,12 +321,12 @@ kube_tag: type: string description: tag of the k8s containers used to provision the kubernetes cluster - default: v1.7.4 + default: v1.9.3 kube_version: type: string description: version of kubernetes used for kubernetes cluster - default: v1.7.4 + default: v1.9.3 kube_dashboard_version: type: string @@ -510,6 +515,7 @@ kubernetes_port: {get_param: kubernetes_port} tls_disabled: {get_param: tls_disabled} kube_dashboard_enabled: {get_param: kube_dashboard_enabled} + influx_grafana_dashboard_enabled: {get_param: influx_grafana_dashboard_enabled} verify_ca: {get_param: verify_ca} secgroup_base_id: {get_resource: secgroup_base} secgroup_kube_master_id: {get_resource: secgroup_kube_master} diff -Nru magnum-6.0.1/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml --- magnum-6.0.1/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/drivers/k8s_fedora_ironic_v1/templates/kubemaster.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -97,6 +97,10 @@ type: boolean description: whether or not to disable kubernetes dashboard + influx_grafana_dashboard_enabled: + type: boolean + description: Enable influxdb with grafana dashboard for data from heapster + verify_ca: type: boolean description: whether or not to validate certificate authority @@ -299,6 +303,7 @@ "$CLUSTER_SUBNET": {get_param: fixed_subnet} "$TLS_DISABLED": {get_param: tls_disabled} "$KUBE_DASHBOARD_ENABLED": {get_param: kube_dashboard_enabled} + "$INFLUX_GRAFANA_DASHBOARD_ENABLED": {get_param: influx_grafana_dashboard_enabled} "$VERIFY_CA": {get_param: verify_ca} "$CLUSTER_UUID": {get_param: cluster_uuid} "$MAGNUM_URL": {get_param: magnum_url} @@ -391,11 +396,11 @@ group: ungrouped config: {get_file: ../../common/templates/kubernetes/fragments/enable-kube-controller-manager-scheduler.sh} - kube_system_namespace_service: + kube_apiserver_to_kubelet_role: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: ../../common/templates/kubernetes/fragments/kube-system-namespace-service.sh} + config: {get_file: ../../common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh} kube_ui_service: type: OS::Heat::SoftwareConfig @@ -450,7 +455,7 @@ - config: {get_resource: write_network_config} - config: {get_resource: network_config_service} - config: {get_resource: network_service} - - config: {get_resource: kube_system_namespace_service} + - config: {get_resource: kube_apiserver_to_kubelet_role} - config: {get_resource: enable_kube_controller_manager_scheduler} - config: {get_resource: enable_kube_proxy} - config: {get_resource: kube_ui_service} diff -Nru magnum-6.0.1/magnum/objects/federation.py magnum-6.1.0/magnum/objects/federation.py --- magnum-6.0.1/magnum/objects/federation.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/objects/federation.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,215 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import strutils +from oslo_utils import uuidutils +from oslo_versionedobjects import fields + +from magnum.common import exception +from magnum.db import api as dbapi +from magnum.objects import base +from magnum.objects import fields as m_fields + + +@base.MagnumObjectRegistry.register +class Federation(base.MagnumPersistentObject, base.MagnumObject, + base.MagnumObjectDictCompat): + """Represents a Federation object. + + Version 1.0: Initial Version + """ + + VERSION = '1.0' + + dbapi = dbapi.get_instance() + + fields = { + 'id': fields.IntegerField(), + 'uuid': fields.UUIDField(nullable=True), + 'name': fields.StringField(nullable=True), + 'project_id': fields.StringField(nullable=True), + 'hostcluster_id': fields.StringField(nullable=True), + 'member_ids': fields.ListOfStringsField(nullable=True), + 'status': m_fields.FederationStatusField(nullable=True), + 'status_reason': fields.StringField(nullable=True), + 'properties': fields.DictOfStringsField(nullable=True) + } + + @staticmethod + def _from_db_object(federation, db_federation): + """Converts a database entity to a formal object.""" + for field in federation.fields: + federation[field] = db_federation[field] + + federation.obj_reset_changes() + return federation + + @staticmethod + def _from_db_object_list(db_objects, cls, context): + """Converts a list of database entities to a list of formal objects.""" + return [Federation._from_db_object(cls(context), obj) + for obj in db_objects] + + @base.remotable_classmethod + def get(cls, context, federation_id): + """Find a federation based on its id or uuid and return it. + + :param federation_id: the id *or* uuid of a federation. + :param context: Security context + :returns: a :class:`Federation` object. + """ + if strutils.is_int_like(federation_id): + return cls.get_by_id(context, federation_id) + elif uuidutils.is_uuid_like(federation_id): + return cls.get_by_uuid(context, federation_id) + else: + raise exception.InvalidIdentity(identity=federation_id) + + @base.remotable_classmethod + def get_by_id(cls, context, federation_id): + """Find a federation based on its integer id and return it. + + :param federation_id: the id of a federation. + :param context: Security context + :returns: a :class:`Federation` object. + """ + db_federation = cls.dbapi.get_federation_by_id(context, federation_id) + federation = Federation._from_db_object(cls(context), db_federation) + return federation + + @base.remotable_classmethod + def get_by_uuid(cls, context, uuid): + """Find a federation based on uuid and return it. + + :param uuid: the uuid of a federation. + :param context: Security context + :returns: a :class:`Federation` object. + """ + db_federation = cls.dbapi.get_federation_by_uuid(context, uuid) + federation = Federation._from_db_object(cls(context), db_federation) + return federation + + @base.remotable_classmethod + def get_count_all(cls, context, filters=None): + """Get count of matching federation. + + :param context: The security context + :param filters: filter dict, can includes 'name', 'project_id', + 'hostcluster_id', 'member_ids', 'status' (should be a + status list). + :returns: Count of matching federation. + """ + return cls.dbapi.get_federation_count_all(context, filters=filters) + + @base.remotable_classmethod + def get_by_name(cls, context, name): + """Find a federation based on name and return a Federation object. + + :param name: the logical name of a federation. + :param context: Security context + :returns: a :class:`Federation` object. + """ + db_federation = cls.dbapi.get_federation_by_name(context, name) + federation = Federation._from_db_object(cls(context), db_federation) + return federation + + @base.remotable_classmethod + def list(cls, context, limit=None, marker=None, + sort_key=None, sort_dir=None, filters=None): + """Return a list of Federation objects. + + :param context: Security context. + :param limit: maximum number of resources to return in a single result. + :param marker: pagination marker for large data sets. + :param sort_key: column to sort results by. + :param sort_dir: direction to sort. "asc" or "desc". + :param filters: filter dict, can includes 'name', 'project_id', + 'hostcluster_id', 'member_ids', 'status' (should be a + status list). + :returns: a list of :class:`Federation` object. + + """ + db_federation = cls.dbapi.get_federation_list(context, limit=limit, + marker=marker, + sort_key=sort_key, + sort_dir=sort_dir, + filters=filters) + return Federation._from_db_object_list(db_federation, cls, context) + + @base.remotable + def create(self, context=None): + """Create a Federation record in the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Federation(context) + + """ + values = self.obj_get_changes() + db_federation = self.dbapi.create_federation(values) + self._from_db_object(self, db_federation) + + @base.remotable + def destroy(self, context=None): + """Delete the Federation from the DB. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Federation(context) + """ + self.dbapi.destroy_federation(self.uuid) + self.obj_reset_changes() + + @base.remotable + def save(self, context=None): + """Save updates to this Federation. + + Updates will be made column by column based on the result + of self.what_changed(). + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Federation(context) + """ + updates = self.obj_get_changes() + self.dbapi.update_federation(self.uuid, updates) + + self.obj_reset_changes() + + @base.remotable + def refresh(self, context=None): + """Load updates for this Federation. + + Loads a Federation with the same uuid from the database and + checks for updated attributes. Updates are applied from + the loaded Federation column by column, if there are any updates. + + :param context: Security context. NOTE: This should only + be used internally by the indirection_api. + Unfortunately, RPC requires context as the first + argument, even though we don't use it. + A context should be set when instantiating the + object, e.g.: Federation(context) + """ + current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) + for field in self.fields: + if self.obj_attr_is_set(field) and self[field] != current[field]: + self[field] = current[field] diff -Nru magnum-6.0.1/magnum/objects/fields.py magnum-6.1.0/magnum/objects/fields.py --- magnum-6.0.1/magnum/objects/fields.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/objects/fields.py 2018-02-23 14:02:04.000000000 +0000 @@ -49,6 +49,28 @@ super(ClusterStatus, self).__init__(valid_values=ClusterStatus.ALL) +class FederationStatus(fields.Enum): + CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS' + CREATE_FAILED = 'CREATE_FAILED' + CREATE_COMPLETE = 'CREATE_COMPLETE' + UPDATE_IN_PROGRESS = 'UPDATE_IN_PROGRESS' + UPDATE_FAILED = 'UPDATE_FAILED' + UPDATE_COMPLETE = 'UPDATE_COMPLETE' + DELETE_IN_PROGRESS = 'DELETE_IN_PROGRESS' + DELETE_FAILED = 'DELETE_FAILED' + DELETE_COMPLETE = 'DELETE_COMPLETE' + + ALL = (CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE, + UPDATE_IN_PROGRESS, UPDATE_FAILED, UPDATE_COMPLETE, + DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE) + + STATUS_FAILED = (CREATE_FAILED, UPDATE_FAILED, DELETE_FAILED) + + def __init__(self): + super(FederationStatus, self).__init__( + valid_values=FederationStatus.ALL) + + class ContainerStatus(fields.Enum): ALL = ( ERROR, RUNNING, STOPPED, PAUSED, UNKNOWN, @@ -146,3 +168,7 @@ class ServerTypeField(fields.BaseEnumField): AUTO_TYPE = ServerType() + + +class FederationStatusField(fields.BaseEnumField): + AUTO_TYPE = FederationStatus() diff -Nru magnum-6.0.1/magnum/objects/__init__.py magnum-6.1.0/magnum/objects/__init__.py --- magnum-6.0.1/magnum/objects/__init__.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/objects/__init__.py 2018-02-23 14:02:04.000000000 +0000 @@ -15,6 +15,7 @@ from magnum.objects import certificate from magnum.objects import cluster from magnum.objects import cluster_template +from magnum.objects import federation from magnum.objects import magnum_service from magnum.objects import quota from magnum.objects import stats @@ -28,10 +29,13 @@ X509KeyPair = x509keypair.X509KeyPair Certificate = certificate.Certificate Stats = stats.Stats +Federation = federation.Federation __all__ = (Cluster, ClusterTemplate, MagnumService, X509KeyPair, Certificate, Stats, - Quota) + Quota, + Federation + ) diff -Nru magnum-6.0.1/magnum/tests/contrib/gate_hook.sh magnum-6.1.0/magnum/tests/contrib/gate_hook.sh --- magnum-6.0.1/magnum/tests/contrib/gate_hook.sh 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/contrib/gate_hook.sh 2018-02-23 14:02:04.000000000 +0000 @@ -85,8 +85,8 @@ export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_DISK=10" export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=5" else - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL='${NODEPOOL_ATOMIC_MIRROR}/atomic/stable/Fedora-Atomic-26-20170723.0/CloudImages/x86_64/images/Fedora-Atomic-26-20170723.0.x86_64.qcow2'" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_IMAGE_NAME='Fedora-Atomic-26-20170723.0.x86_64'" + export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_GUEST_IMAGE_URL='${NODEPOOL_ATOMIC_MIRROR}/atomic/stable/Fedora-Atomic-27-20180212.2/CloudImages/x86_64/images/Fedora-Atomic-27-20180212.2.x86_64.qcow2'" + export DEVSTACK_LOCAL_CONFIG+=$'\n'"MAGNUM_IMAGE_NAME='Fedora-Atomic-27-20180212.2.x86_64'" fi # Enable magnum plugin in the last step diff -Nru magnum-6.0.1/magnum/tests/functional/python_client_base.py magnum-6.1.0/magnum/tests/functional/python_client_base.py --- magnum-6.0.1/magnum/tests/functional/python_client_base.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/functional/python_client_base.py 2018-02-23 14:02:04.000000000 +0000 @@ -272,7 +272,12 @@ req_extensions = req_ext prompt = no [req_distinguished_name] -CN = Your Name +CN = admin +O = system:masters +OU=OpenStack/Magnum +C=US +ST=TX +L=Austin [req_ext] extendedKeyUsage = clientAuth """ diff -Nru magnum-6.0.1/magnum/tests/unit/api/controllers/test_root.py magnum-6.1.0/magnum/tests/unit/api/controllers/test_root.py --- magnum-6.0.1/magnum/tests/unit/api/controllers/test_root.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/api/controllers/test_root.py 2018-02-23 14:02:04.000000000 +0000 @@ -86,7 +86,11 @@ u'mservices': [{u'href': u'http://localhost/v1/mservices/', u'rel': u'self'}, {u'href': u'http://localhost/mservices/', - u'rel': u'bookmark'}]} + u'rel': u'bookmark'}], + u'federations': [{u'href': u'http://localhost/v1/federations/', + u'rel': u'self'}, + {u'href': u'http://localhost/federations/', + u'rel': u'bookmark'}]} def make_app(self, paste_file): file_name = self.get_path(paste_file) diff -Nru magnum-6.0.1/magnum/tests/unit/api/controllers/v1/test_cluster.py magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_cluster.py --- magnum-6.0.1/magnum/tests/unit/api/controllers/v1/test_cluster.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_cluster.py 2018-02-23 14:02:04.000000000 +0000 @@ -950,6 +950,16 @@ self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) + @mock.patch("magnum.common.policy.enforce") + @mock.patch("magnum.common.context.make_context") + def test_delete_cluster_as_admin(self, mock_context, mock_policy): + temp_uuid = uuidutils.generate_uuid() + obj_utils.create_test_cluster(self.context, uuid=temp_uuid) + self.context.is_admin = True + response = self.delete('/clusters/%s' % temp_uuid, + expect_errors=True) + self.assertEqual(204, response.status_int) + class TestClusterPolicyEnforcement(api_base.FunctionalTest): def setUp(self): diff -Nru magnum-6.0.1/magnum/tests/unit/api/controllers/v1/test_cluster_template.py magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_cluster_template.py --- magnum-6.0.1/magnum/tests/unit/api/controllers/v1/test_cluster_template.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_cluster_template.py 2018-02-23 14:02:04.000000000 +0000 @@ -334,6 +334,19 @@ self.cluster_template.uuid) self.assertEqual(response['public'], True) + def test_update_cluster_template_replace_labels_success(self): + cluster_template = obj_utils.create_test_cluster_template(self.context) + response = self.patch_json('/clustertemplates/%s' % + cluster_template.uuid, + [{'path': '/labels', + 'value': '{\'etcd_volume_size\': \'1\'}', + 'op': 'replace'}], + expect_errors=True) + self.assertEqual(200, response.status_int) + response = self.get_json('/clustertemplates/%s' % + self.cluster_template.uuid) + self.assertEqual(response['labels'], {'etcd_volume_size': '1'}) + def test_update_cluster_template_with_cluster_not_allow_update(self): cluster_template = obj_utils.create_test_cluster_template(self.context) obj_utils.create_test_cluster( @@ -1004,6 +1017,14 @@ self.assertEqual(201, resp.status_int) self.assertIsNotNone(resp.json['name']) + def test_create_cluster_with_disabled_driver(self): + cfg.CONF.set_override('disabled_drivers', + ['mesos_ubuntu_v1'], + group='drivers') + bdict = apiutils.cluster_template_post_data(coe="mesos") + self.assertRaises(AppError, self.post_json, '/clustertemplates', + bdict) + class TestDelete(api_base.FunctionalTest): @@ -1064,6 +1085,17 @@ self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['errors']) + @mock.patch("magnum.common.policy.enforce") + @mock.patch("magnum.common.context.make_context") + def test_delete_cluster_template_as_admin(self, mock_context, mock_policy): + temp_uuid = uuidutils.generate_uuid() + obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid, + project_id=temp_uuid) + self.context.is_admin = True + response = self.delete('/clustertemplates/%s' % temp_uuid, + expect_errors=True) + self.assertEqual(204, response.status_int) + class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest): diff -Nru magnum-6.0.1/magnum/tests/unit/api/controllers/v1/test_federation.py magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_federation.py --- magnum-6.0.1/magnum/tests/unit/api/controllers/v1/test_federation.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/api/controllers/v1/test_federation.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,415 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import mock + +from oslo_config import cfg +from oslo_utils import uuidutils + +from magnum.api.controllers.v1 import federation as api_federation +from magnum.conductor import api as rpcapi +import magnum.conf +from magnum import objects +from magnum.tests import base +from magnum.tests.unit.api import base as api_base +from magnum.tests.unit.api import utils as apiutils +from magnum.tests.unit.objects import utils as obj_utils + +CONF = magnum.conf.CONF + + +class TestFederationObject(base.TestCase): + def test_federation_init(self): + fed_dict = apiutils.federation_post_data() + fed_dict['uuid'] = uuidutils.generate_uuid() + federation = api_federation.Federation(**fed_dict) + self.assertEqual(fed_dict['uuid'], federation.uuid) + + +class TestListFederation(api_base.FunctionalTest): + def setUp(self): + super(TestListFederation, self).setUp() + + def test_empty(self): + response = self.get_json('/federations') + self.assertEqual(response['federations'], []) + + def test_one(self): + federation = obj_utils.create_test_federation( + self.context, uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations') + self.assertEqual(federation.uuid, response['federations'][0]['uuid']) + + def test_get_one(self): + federation = obj_utils.create_test_federation( + self.context, uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations/%s' % federation['uuid']) + self.assertTrue(response['uuid'], federation.uuid) + + def test_get_one_by_name(self): + federation = obj_utils.create_test_federation( + self.context, uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations/%s' % federation['name']) + self.assertTrue(response['uuid'], federation.uuid) + + def test_get_one_by_name_not_found(self): + response = self.get_json('/federations/not_found', expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_get_one_by_uuid(self): + temp_uuid = uuidutils.generate_uuid() + federation = obj_utils.create_test_federation(self.context, + uuid=temp_uuid) + response = self.get_json('/federations/%s' % temp_uuid) + self.assertTrue(response['uuid'], federation.uuid) + + def test_get_one_by_uuid_not_found(self): + temp_uuid = uuidutils.generate_uuid() + response = self.get_json('/federations/%s' % temp_uuid, + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_get_one_by_name_multiple_federation(self): + obj_utils.create_test_federation(self.context, name='test_federation', + uuid=uuidutils.generate_uuid()) + obj_utils.create_test_federation(self.context, name='test_federation', + uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations/test_federation', + expect_errors=True) + self.assertEqual(409, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_get_all_with_pagination_marker(self): + federation_list = [] + for id_ in range(4): + federation = obj_utils.create_test_federation( + self.context, id=id_, uuid=uuidutils.generate_uuid()) + federation_list.append(federation) + + response = self.get_json( + '/federations?limit=3&marker=%s' % federation_list[2].uuid) + self.assertEqual(1, len(response['federations'])) + self.assertEqual(federation_list[-1].uuid, + response['federations'][0]['uuid']) + + def test_detail(self): + federation = obj_utils.create_test_federation( + self.context, uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations/detail') + self.assertEqual(federation.uuid, response['federations'][0]["uuid"]) + + def test_detail_with_pagination_marker(self): + federation_list = [] + for id_ in range(4): + federation = obj_utils.create_test_federation( + self.context, id=id_, uuid=uuidutils.generate_uuid()) + federation_list.append(federation) + + response = self.get_json( + '/federations/detail?limit=3&marker=%s' % federation_list[2].uuid) + self.assertEqual(1, len(response['federations'])) + self.assertEqual(federation_list[-1].uuid, + response['federations'][0]['uuid']) + + def test_detail_against_single(self): + federation = obj_utils.create_test_federation( + self.context, uuid=uuidutils.generate_uuid()) + response = self.get_json( + '/federations/%s/detail' % federation['uuid'], expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_many(self): + federation_list = [] + for id_ in range(5): + temp_uuid = uuidutils.generate_uuid() + federation = obj_utils.create_test_federation( + self.context, id=id_, uuid=temp_uuid) + federation_list.append(federation.uuid) + + response = self.get_json('/federations') + self.assertEqual(len(federation_list), len(response['federations'])) + uuids = [f['uuid'] for f in response['federations']] + self.assertEqual(sorted(federation_list), sorted(uuids)) + + def test_links(self): + uuid = uuidutils.generate_uuid() + obj_utils.create_test_federation(self.context, id=1, uuid=uuid) + response = self.get_json('/federations/%s' % uuid) + self.assertIn('links', response.keys()) + self.assertEqual(2, len(response['links'])) + self.assertIn(uuid, response['links'][0]['href']) + for l in response['links']: + bookmark = l['rel'] == 'bookmark' + self.assertTrue(self.validate_link(l['href'], + bookmark=bookmark)) + + def test_collection_links(self): + for id_ in range(5): + obj_utils.create_test_federation(self.context, id=id_, + uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations/?limit=3') + next_marker = response['federations'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + def test_collection_links_default_limit(self): + cfg.CONF.set_override('max_limit', 3, 'api') + for id_ in range(5): + obj_utils.create_test_federation(self.context, id=id_, + uuid=uuidutils.generate_uuid()) + response = self.get_json('/federations') + self.assertEqual(3, len(response['federations'])) + + next_marker = response['federations'][-1]['uuid'] + self.assertIn(next_marker, response['next']) + + +class TestPatch(api_base.FunctionalTest): + def setUp(self): + super(TestPatch, self).setUp() + p = mock.patch.object(rpcapi.API, 'federation_update_async') + self.mock_federation_update = p.start() + self.mock_federation_update.side_effect = \ + self._sim_rpc_federation_update + self.addCleanup(p.stop) + + def _sim_rpc_federation_update(self, federation, rollback=False): + federation.save() + return federation + + def test_member_join(self): + f = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid(), member_ids=[]) + new_member = obj_utils.create_test_cluster(self.context) + + response = self.patch_json( + '/federations/%s' % f.uuid, + [{'path': '/member_ids', 'value': new_member.uuid, 'op': 'add'}]) + self.assertEqual(202, response.status_int) + + # make sure it was added: + fed = self.get_json('/federations/%s' % f.uuid) + self.assertTrue(new_member.uuid in fed['member_ids']) + + def test_member_unjoin(self): + member = obj_utils.create_test_cluster(self.context) + federation = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid(), member_ids=[member.uuid]) + + response = self.patch_json( + '/federations/%s' % federation.uuid, + [{'path': '/member_ids', 'value': member.uuid, 'op': 'remove'}]) + self.assertEqual(202, response.status_int) + + # make sure it was deleted: + fed = self.get_json('/federations/%s' % federation.uuid) + self.assertFalse(member.uuid in fed['member_ids']) + + def test_join_non_existent_cluster(self): + foo_uuid = uuidutils.generate_uuid() + f = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid(), member_ids=[]) + + response = self.patch_json( + '/federations/%s' % f.uuid, + [{'path': '/member_ids', 'value': foo_uuid, 'op': 'add'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_unjoin_non_existent_cluster(self): + foo_uuid = uuidutils.generate_uuid() + f = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid(), member_ids=[]) + + response = self.patch_json( + '/federations/%s' % f.uuid, + [{'path': '/member_ids', 'value': foo_uuid, 'op': 'remove'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + + def test_join_cluster_already_member(self): + cluster = obj_utils.create_test_cluster(self.context) + f = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid(), member_ids=[cluster.uuid]) + + response = self.patch_json( + '/federations/%s' % f.uuid, + [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'add'}], + expect_errors=True) + self.assertEqual(409, response.status_int) + + def test_unjoin_non_member_cluster(self): + cluster = obj_utils.create_test_cluster(self.context) + f = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid(), member_ids=[]) + + response = self.patch_json( + '/federations/%s' % f.uuid, + [{'path': '/member_ids', 'value': cluster.uuid, 'op': 'remove'}], + expect_errors=True) + self.assertEqual(404, response.status_int) + + +class TestPost(api_base.FunctionalTest): + def setUp(self): + super(TestPost, self).setUp() + p = mock.patch.object(rpcapi.API, 'federation_create_async') + self.mock_fed_create = p.start() + self.mock_fed_create.side_effect = self._simulate_federation_create + self.addCleanup(p.stop) + self.hostcluster = obj_utils.create_test_cluster(self.context) + + def _simulate_federation_create(self, federation, create_timeout): + federation.create() + return federation + + @mock.patch('oslo_utils.timeutils.utcnow') + def test_create_federation(self, mock_utcnow): + bdict = apiutils.federation_post_data( + uuid=uuidutils.generate_uuid(), + hostcluster_id=self.hostcluster.uuid) + test_time = datetime.datetime(2000, 1, 1, 0, 0) + mock_utcnow.return_value = test_time + + response = self.post_json('/federations', bdict) + self.assertEqual('application/json', response.content_type) + self.assertEqual(202, response.status_int) + self.assertTrue(uuidutils.is_uuid_like(response.json['uuid'])) + + def test_create_federation_no_hostcluster_id(self): + bdict = apiutils.federation_post_data(uuid=uuidutils.generate_uuid()) + del bdict['hostcluster_id'] + response = self.post_json('/federations', bdict, expect_errors=True) + self.assertEqual(400, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_create_federation_hostcluster_does_not_exist(self): + bdict = apiutils.federation_post_data( + uuid=uuidutils.generate_uuid(), + hostcluster_id=uuidutils.generate_uuid()) + response = self.post_json('/federations', bdict, expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_create_federation_no_dns_zone_name(self): + bdict = apiutils.federation_post_data( + uuid=uuidutils.generate_uuid(), + hostcluster_id=self.hostcluster.uuid) + del bdict['properties'] + response = self.post_json('/federations', bdict, expect_errors=True) + self.assertEqual(400, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_create_federation_generate_uuid(self): + bdict = apiutils.federation_post_data( + hostcluster_id=self.hostcluster.uuid) + del bdict['uuid'] + response = self.post_json('/federations', bdict) + self.assertEqual(202, response.status_int) + + def test_create_federation_with_invalid_name(self): + invalid_names = [ + 'x' * 243, '123456', '123456test_federation', + '-test_federation', '.test_federation', '_test_federation', '' + ] + + for value in invalid_names: + bdict = apiutils.federation_post_data( + uuid=uuidutils.generate_uuid(), name=value, + hostcluster_id=self.hostcluster.uuid) + response = self.post_json('/federations', bdict, + expect_errors=True) + self.assertEqual('application/json', response.content_type) + self.assertEqual(400, response.status_int) + self.assertTrue(response.json['errors']) + + def test_create_federation_with_valid_name(self): + valid_names = [ + 'test_federation123456', 'test-federation', 'test.federation', + 'testfederation.', 'testfederation-', 'testfederation_', + 'test.-_federation', 'Testfederation' + ] + + for value in valid_names: + bdict = apiutils.federation_post_data( + name=value, hostcluster_id=self.hostcluster.uuid) + bdict['uuid'] = uuidutils.generate_uuid() + response = self.post_json('/federations', bdict) + self.assertEqual(202, response.status_int) + + def test_create_federation_without_name(self): + bdict = apiutils.federation_post_data( + uuid=uuidutils.generate_uuid(), + hostcluster_id=self.hostcluster.uuid) + del bdict['name'] + response = self.post_json('/federations', bdict) + self.assertEqual(202, response.status_int) + + +class TestDelete(api_base.FunctionalTest): + def setUp(self): + super(TestDelete, self).setUp() + self.federation = obj_utils.create_test_federation( + self.context, name='federation-example', + uuid=uuidutils.generate_uuid()) + p = mock.patch.object(rpcapi.API, 'federation_delete_async') + self.mock_federation_delete = p.start() + self.mock_federation_delete.side_effect = \ + self._simulate_federation_delete + self.addCleanup(p.stop) + + def _simulate_federation_delete(self, federation_uuid): + federation = objects.Federation.get_by_uuid(self.context, + federation_uuid) + federation.destroy() + + def test_delete_federation(self): + self.delete('/federations/%s' % self.federation.uuid) + response = self.get_json('/federations/%s' % self.federation.uuid, + expect_errors=True) + self.assertEqual(404, response.status_int) + self.assertEqual('application/json', response.content_type) + self.assertTrue(response.json['errors']) + + def test_delete_federation_not_found(self): + delete = self.delete('/federations/%s' % uuidutils.generate_uuid(), + expect_errors=True) + self.assertEqual(404, delete.status_int) + self.assertEqual('application/json', delete.content_type) + self.assertTrue(delete.json['errors']) + + def test_delete_federation_with_name(self): + delete = self.delete('/federations/%s' % self.federation.name) + self.assertEqual(204, delete.status_int) + + def test_delete_federation_with_name_not_found(self): + delete = self.delete('/federations/%s' % 'foo', + expect_errors=True) + self.assertEqual(404, delete.status_int) + self.assertEqual('application/json', delete.content_type) + self.assertTrue(delete.json['errors']) diff -Nru magnum-6.0.1/magnum/tests/unit/api/utils.py magnum-6.1.0/magnum/tests/unit/api/utils.py --- magnum-6.0.1/magnum/tests/unit/api/utils.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/api/utils.py 2018-02-23 14:02:04.000000000 +0000 @@ -20,6 +20,7 @@ from magnum.api.controllers.v1 import baymodel as baymodel_controller from magnum.api.controllers.v1 import cluster as cluster_controller from magnum.api.controllers.v1 import cluster_template as cluster_tmp_ctrl +from magnum.api.controllers.v1 import federation as federation_controller from magnum.tests.unit.db import utils @@ -86,3 +87,9 @@ 'created_at': kw.get('created_at', faketime), 'updated_at': kw.get('updated_at', faketime), } + + +def federation_post_data(**kw): + federation = utils.get_test_federation(**kw) + internal = federation_controller.FederationPatchType.internal_attrs() + return remove_internal(federation, internal) diff -Nru magnum-6.0.1/magnum/tests/unit/common/x509/test_sign.py magnum-6.1.0/magnum/tests/unit/common/x509/test_sign.py --- magnum-6.0.1/magnum/tests/unit/common/x509/test_sign.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/common/x509/test_sign.py 2018-02-23 14:02:04.000000000 +0000 @@ -32,6 +32,7 @@ super(TestX509, self).setUp() self.issuer_name = six.u("fake-issuer") self.subject_name = six.u("fake-subject") + self.organization_name = six.u("fake-organization") self.ca_encryption_password = six.b("fake-ca-password") self.encryption_password = six.b("fake-password") @@ -59,6 +60,7 @@ keypairs = operations.generate_client_certificate( self.issuer_name, self.subject_name, + self.organization_name, ca['private_key'], encryption_password=self.encryption_password, ca_key_password=self.ca_encryption_password, diff -Nru magnum-6.0.1/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py magnum-6.1.0/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py --- magnum-6.0.1/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/conductor/handlers/common/test_cert_manager.py 2018-02-23 14:02:04.000000000 +0000 @@ -63,7 +63,8 @@ @mock.patch('magnum.common.x509.operations.generate_client_certificate') @mock.patch('magnum.common.short_id.generate_id') def test_generate_client_cert(self, mock_generate_id, mock_generate_cert): - expected_name = cert_manager.CONDUCTOR_CLIENT_NAME + expected_name = 'admin' + expected_organization_name = 'system:masters' expected_ca_name = 'ca-name' expected_password = 'password' expected_ca_password = 'ca-password' @@ -88,6 +89,7 @@ mock_generate_cert.assert_called_once_with( expected_ca_name, expected_name, + expected_organization_name, expected_ca_cert['private_key'], encryption_password=expected_password, ca_key_password=expected_ca_password, @@ -96,7 +98,7 @@ certificate=expected_cert['certificate'], private_key=expected_cert['private_key'], private_key_passphrase=expected_password, - name=expected_name, + name=cert_manager.CONDUCTOR_CLIENT_NAME, context=None ) diff -Nru magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_federation_conductor.py magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_federation_conductor.py --- magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_federation_conductor.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_federation_conductor.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from magnum.conductor.handlers import federation_conductor +from magnum import objects +from magnum.tests.unit.db import base as db_base +from magnum.tests.unit.db import utils + + +class TestHandler(db_base.DbTestCase): + + def setUp(self): + super(TestHandler, self).setUp() + self.handler = federation_conductor.Handler() + federation_dict = utils.get_test_federation() + self.federation = objects.Federation(self.context, **federation_dict) + self.federation.create() + + def test_create_federation(self): + self.assertRaises(NotImplementedError, self.handler.federation_create, + self.context, self.federation, create_timeout=15) + + def test_update_federation(self): + self.assertRaises(NotImplementedError, self.handler.federation_update, + self.context, self.federation, rollback=False) + + def test_delete_federation(self): + self.assertRaises(NotImplementedError, self.handler.federation_delete, + self.context, self.federation.uuid) diff -Nru magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py --- magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py 2018-02-23 14:02:04.000000000 +0000 @@ -56,6 +56,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': 0, 'availability_zone': 'az_1'}, @@ -97,7 +98,16 @@ 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', - 'availability_zone': 'az_1'}, + 'availability_zone': 'az_1', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', + 'influx_grafana_dashboard_enabled': 'True'}, 'master_flavor_id': 'master_flavor_id', 'flavor_id': 'flavor_id', } @@ -175,9 +185,19 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, - 'availability_zone': 'az_1'}, + 'availability_zone': 'az_1', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', + }, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', @@ -213,6 +233,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', @@ -233,7 +254,15 @@ 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", - 'availability_zone': 'az_1' + 'availability_zone': 'az_1', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', } if missing_attr is not None: expected.pop(mapping[missing_attr], None) @@ -303,6 +332,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'magnum_url': 'http://127.0.0.1:9511/v1', @@ -331,7 +361,15 @@ 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", - 'availability_zone': 'az_1' + 'availability_zone': 'az_1', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) @@ -396,6 +434,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', 'etcd_volume_size': None, 'insecure_registry_url': '10.0.0.1:5000', @@ -416,7 +455,15 @@ 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", - 'availability_zone': 'az_1' + 'availability_zone': 'az_1', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( @@ -481,6 +528,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, @@ -494,7 +542,15 @@ 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, - 'openstack_ca': '' + 'openstack_ca': '', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( @@ -554,6 +610,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'tls_disabled': False, 'registry_enabled': False, 'trustee_domain_id': self.mock_keystone.trustee_domain_id, @@ -567,7 +624,15 @@ 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, - 'openstack_ca': '' + 'openstack_ca': '', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( @@ -724,6 +789,7 @@ 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', + 'influx_grafana_dashboard_enabled': 'True', 'username': 'fake_user', 'cluster_uuid': self.cluster_dict['uuid'], 'magnum_url': self.mock_osc.magnum_url.return_value, @@ -741,7 +807,15 @@ 'verify_ca': True, 'openstack_ca': '', "nodes_affinity_policy": "soft-anti-affinity", - 'availability_zone': 'az_1' + 'availability_zone': 'az_1', + 'cert_manager_api': 'False', + 'ingress_controller': 'i-controller', + 'ingress_controller_role': 'i-controller-role', + 'kubelet_options': '--kubelet', + 'kubeapi_options': '--kubeapi', + 'kubecontroller_options': '--kubecontroller', + 'kubescheduler_options': '--kubescheduler', + 'kubeproxy_options': '--kubeproxy', } self.assertEqual(expected, definition) self.assertEqual( diff -Nru magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py --- magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py 2018-02-23 14:02:04.000000000 +0000 @@ -207,8 +207,10 @@ @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_with_lb( + @patch('magnum.common.keystone.KeystoneClientV3') + def test_extract_template_definition_with_lb_neutron( self, + mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid): self.cluster_template_dict['master_lb_enabled'] = True @@ -219,6 +221,8 @@ cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() + mock_kc.return_value.client.services.list.return_value = [] + (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, @@ -266,8 +270,78 @@ @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') + @patch('magnum.common.keystone.KeystoneClientV3') + def test_extract_template_definition_with_lb_octavia( + self, + mock_kc, + mock_driver, + mock_objects_cluster_template_get_by_uuid): + self.cluster_template_dict['master_lb_enabled'] = True + cluster_template = objects.ClusterTemplate( + self.context, **self.cluster_template_dict) + mock_objects_cluster_template_get_by_uuid.return_value = \ + cluster_template + cluster = objects.Cluster(self.context, **self.cluster_dict) + mock_driver.return_value = mesos_dr.Driver() + + class Service(object): + def __init__(self): + self.enabled = True + + mock_kc.return_value.client.services.list.return_value = [Service()] + + (template_path, + definition, + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) + + expected = { + 'ssh_key_name': 'keypair_id', + 'external_network': 'external_network_id', + 'fixed_network': 'fixed_network', + 'fixed_subnet': 'fixed_subnet', + 'dns_nameserver': 'dns_nameserver', + 'server_image': 'image_id', + 'master_flavor': 'master_flavor_id', + 'slave_flavor': 'flavor_id', + 'number_of_slaves': 1, + 'number_of_masters': 1, + 'http_proxy': 'http_proxy', + 'https_proxy': 'https_proxy', + 'no_proxy': 'no_proxy', + 'cluster_name': 'cluster1', + 'trustee_domain_id': self.mock_keystone.trustee_domain_id, + 'trustee_username': 'fake_trustee', + 'trustee_password': 'fake_trustee_password', + 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', + 'trust_id': '', + 'volume_driver': 'volume_driver', + 'auth_url': 'http://192.168.10.10:5000/v3', + 'region_name': self.mock_osc.cinder_region_name.return_value, + 'username': 'mesos_user', + 'tenant_name': 'admin', + 'domain_name': 'domainname', + 'rexray_preempt': 'False', + 'mesos_slave_executor_env_variables': '{}', + 'mesos_slave_isolation': 'docker/runtime,filesystem/linux', + 'mesos_slave_work_dir': '/tmp/mesos/slave', + 'mesos_slave_image_providers': 'docker', + 'verify_ca': True, + 'openstack_ca': '', + } + self.assertEqual(expected, definition) + self.assertEqual( + ['../../common/templates/environments/no_private_network.yaml', + '../../common/templates/environments/with_master_lb_octavia.yaml' + ], + env_files) + + @patch('magnum.objects.ClusterTemplate.get_by_uuid') + @patch('magnum.drivers.common.driver.Driver.get_driver') + @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_multi_master( self, + mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid): self.cluster_template_dict['master_lb_enabled'] = True @@ -279,6 +353,8 @@ cluster = objects.Cluster(self.context, **self.cluster_dict) mock_driver.return_value = mesos_dr.Driver() + mock_kc.return_value.client.services.list.return_value = [] + (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, diff -Nru magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py --- magnum-6.0.1/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py 2018-02-23 14:02:04.000000000 +0000 @@ -330,8 +330,10 @@ @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') - def test_extract_template_definition_with_lb( + @patch('magnum.common.keystone.KeystoneClientV3') + def test_extract_template_definition_with_lb_neutron( self, + mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): @@ -348,6 +350,8 @@ mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) + mock_kc.return_value.client.services.list.return_value = [] + (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, @@ -403,8 +407,92 @@ @patch('requests.get') @patch('magnum.objects.ClusterTemplate.get_by_uuid') @patch('magnum.drivers.common.driver.Driver.get_driver') + @patch('magnum.common.keystone.KeystoneClientV3') + def test_extract_template_definition_with_lb_octavia( + self, + mock_kc, + mock_driver, + mock_objects_cluster_template_get_by_uuid, + mock_get): + self.cluster_template_dict['master_lb_enabled'] = True + cluster_template = objects.ClusterTemplate( + self.context, **self.cluster_template_dict) + mock_objects_cluster_template_get_by_uuid.return_value = \ + cluster_template + expected_result = str('{"action":"get","node":{"key":"test","value":' + '"1","modifiedIndex":10,"createdIndex":10}}') + mock_resp = mock.MagicMock() + mock_resp.text = expected_result + mock_get.return_value = mock_resp + mock_driver.return_value = swarm_dr.Driver() + cluster = objects.Cluster(self.context, **self.cluster_dict) + + class Service(object): + def __init__(self): + self.enabled = True + + mock_kc.return_value.client.services.list.return_value = [Service()] + + (template_path, + definition, + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) + + expected = { + 'ssh_key_name': 'keypair_id', + 'external_network': 'external_network_id', + 'fixed_network': 'fixed_network', + 'fixed_subnet': 'fixed_subnet', + 'dns_nameserver': 'dns_nameserver', + 'server_image': 'image_id', + 'master_flavor': 'master_flavor_id', + 'node_flavor': 'flavor_id', + 'number_of_masters': 1, + 'number_of_nodes': 1, + 'docker_volume_size': 20, + 'docker_storage_driver': 'devicemapper', + 'discovery_url': 'https://discovery.test.io/123456789', + 'http_proxy': 'http_proxy', + 'https_proxy': 'https_proxy', + 'no_proxy': 'no_proxy', + 'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52', + 'magnum_url': self.mock_osc.magnum_url.return_value, + 'tls_disabled': False, + 'registry_enabled': False, + 'network_driver': 'network_driver', + 'flannel_network_cidr': '10.101.0.0/16', + 'flannel_network_subnetlen': '26', + 'flannel_backend': 'vxlan', + 'trustee_domain_id': self.mock_keystone.trustee_domain_id, + 'trustee_username': 'fake_trustee', + 'trustee_password': 'fake_trustee_password', + 'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656', + 'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de', + 'auth_url': 'http://192.168.10.10:5000/v3', + 'swarm_version': 'fake-version', + 'swarm_strategy': u'spread', + 'volume_driver': 'rexray', + 'rexray_preempt': 'False', + 'docker_volume_type': 'lvmdriver-1', + 'verify_ca': True, + 'openstack_ca': '', + 'nodes_affinity_policy': 'soft-anti-affinity' + } + self.assertEqual(expected, definition) + self.assertEqual( + ['../../common/templates/environments/no_private_network.yaml', + '../../common/templates/environments/with_volume.yaml', + '../../common/templates/environments/with_master_lb_octavia.yaml' + ], + env_files) + + @patch('requests.get') + @patch('magnum.objects.ClusterTemplate.get_by_uuid') + @patch('magnum.drivers.common.driver.Driver.get_driver') + @patch('magnum.common.keystone.KeystoneClientV3') def test_extract_template_definition_multi_master( self, + mock_kc, mock_driver, mock_objects_cluster_template_get_by_uuid, mock_get): @@ -422,6 +510,8 @@ mock_driver.return_value = swarm_dr.Driver() cluster = objects.Cluster(self.context, **self.cluster_dict) + mock_kc.return_value.client.services.list.return_value = [] + (template_path, definition, env_files) = mock_driver()._extract_template_definition(self.context, diff -Nru magnum-6.0.1/magnum/tests/unit/drivers/test_template_definition.py magnum-6.1.0/magnum/tests/unit/drivers/test_template_definition.py --- magnum-6.0.1/magnum/tests/unit/drivers/test_template_definition.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/drivers/test_template_definition.py 2018-02-23 14:02:04.000000000 +0000 @@ -233,6 +233,7 @@ mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = False mock_cluster_template.registry_enabled = False + mock_cluster_template.network_driver = 'flannel' mock_cluster = mock.MagicMock() mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' del mock_cluster.stack_id @@ -265,15 +266,46 @@ 'grafana_admin_passwd') kube_dashboard_enabled = mock_cluster.labels.get( 'kube_dashboard_enabled') + influx_grafana_dashboard_enabled = mock_cluster.labels.get( + 'influx_grafana_dashboard_enabled') docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') etcd_volume_size = mock_cluster.labels.get( 'etcd_volume_size') kube_tag = mock_cluster.labels.get('kube_tag') + etcd_tag = mock_cluster.labels.get('etcd_tag') + flannel_tag = mock_cluster.labels.get('flannel_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') availability_zone = mock_cluster.labels.get( 'availability_zone') + cert_manager_api = mock_cluster.labels.get('cert_manager_api') + calico_tag = mock_cluster.labels.get( + 'calico_tag') + calico_cni_tag = mock_cluster.labels.get( + 'calico_cni_tag') + calico_kube_controllers_tag = mock_cluster.labels.get( + 'calico_kube_controllers_tag') + calico_ipv4pool = mock_cluster.labels.get( + 'calico_ipv4pool') + if mock_cluster_template.network_driver == 'flannel': + pods_network_cidr = flannel_cidr + elif mock_cluster_template.network_driver == 'calico': + pods_network_cidr = calico_ipv4pool + ingress_controller = mock_cluster.labels.get( + 'ingress_controller') + ingress_controller_role = mock_cluster.labels.get( + 'ingress_controller_role') + kubelet_options = mock_cluster.labels.get( + 'kubelet_options') + kubeapi_options = mock_cluster.labels.get( + 'kubeapi_options') + kubecontroller_options = mock_cluster.labels.get( + 'kubecontroller_options') + kubescheduler_options = mock_cluster.labels.get( + 'kubescheduler_options') + kubeproxy_options = mock_cluster.labels.get( + 'kubeproxy_options') k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() @@ -292,16 +324,32 @@ 'prometheus_monitoring': prometheus_monitoring, 'grafana_admin_passwd': grafana_admin_passwd, 'kube_dashboard_enabled': kube_dashboard_enabled, + 'influx_grafana_dashboard_enabled': + influx_grafana_dashboard_enabled, 'docker_volume_type': docker_volume_type, 'etcd_volume_size': etcd_volume_size, + 'kubelet_options': kubelet_options, + 'kubeapi_options': kubeapi_options, + 'kubecontroller_options': kubecontroller_options, + 'kubescheduler_options': kubescheduler_options, + 'kubeproxy_options': kubeproxy_options, 'username': 'fake_user', 'magnum_url': mock_osc.magnum_url.return_value, 'region_name': mock_osc.cinder_region_name.return_value, 'kube_tag': kube_tag, + 'etcd_tag': etcd_tag, + 'flannel_tag': flannel_tag, 'container_infra_prefix': container_infra_prefix, 'nodes_affinity_policy': 'soft-anti-affinity', 'availability_zone': availability_zone, - }} + 'cert_manager_api': cert_manager_api, + 'calico_tag': calico_tag, + 'calico_cni_tag': calico_cni_tag, + 'calico_kube_controllers_tag': calico_kube_controllers_tag, + 'calico_ipv4pool': calico_ipv4pool, + 'pods_network_cidr': pods_network_cidr, + 'ingress_controller': ingress_controller, + 'ingress_controller_role': ingress_controller_role}} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, @@ -321,6 +369,7 @@ mock_cluster_template = mock.MagicMock() mock_cluster_template.tls_disabled = True mock_cluster_template.registry_enabled = False + mock_cluster_template.network_driver = 'calico' mock_cluster = mock.MagicMock() mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52' del mock_cluster.stack_id @@ -353,15 +402,46 @@ 'grafana_admin_passwd') kube_dashboard_enabled = mock_cluster.labels.get( 'kube_dashboard_enabled') + influx_grafana_dashboard_enabled = mock_cluster.labels.get( + 'influx_grafana_dashboard_enabled') docker_volume_type = mock_cluster.labels.get( 'docker_volume_type') etcd_volume_size = mock_cluster.labels.get( 'etcd_volume_size') kube_tag = mock_cluster.labels.get('kube_tag') + etcd_tag = mock_cluster.labels.get('etcd_tag') + flannel_tag = mock_cluster.labels.get('flannel_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') availability_zone = mock_cluster.labels.get( 'availability_zone') + cert_manager_api = mock_cluster.labels.get('cert_manager_api') + calico_tag = mock_cluster.labels.get( + 'calico_tag') + calico_cni_tag = mock_cluster.labels.get( + 'calico_cni_tag') + calico_kube_controllers_tag = mock_cluster.labels.get( + 'calico_kube_controllers_tag') + calico_ipv4pool = mock_cluster.labels.get( + 'calico_ipv4pool') + if mock_cluster_template.network_driver == 'flannel': + pods_network_cidr = flannel_cidr + elif mock_cluster_template.network_driver == 'calico': + pods_network_cidr = calico_ipv4pool + ingress_controller = mock_cluster.labels.get( + 'ingress_controller') + ingress_controller_role = mock_cluster.labels.get( + 'ingress_controller_role') + kubelet_options = mock_cluster.labels.get( + 'kubelet_options') + kubeapi_options = mock_cluster.labels.get( + 'kubeapi_options') + kubecontroller_options = mock_cluster.labels.get( + 'kubecontroller_options') + kubescheduler_options = mock_cluster.labels.get( + 'kubescheduler_options') + kubeproxy_options = mock_cluster.labels.get( + 'kubeproxy_options') k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() @@ -380,18 +460,34 @@ 'prometheus_monitoring': prometheus_monitoring, 'grafana_admin_passwd': grafana_admin_passwd, 'kube_dashboard_enabled': kube_dashboard_enabled, + 'influx_grafana_dashboard_enabled': + influx_grafana_dashboard_enabled, 'docker_volume_type': docker_volume_type, 'etcd_volume_size': etcd_volume_size, + 'kubelet_options': kubelet_options, + 'kubeapi_options': kubeapi_options, + 'kubecontroller_options': kubecontroller_options, + 'kubescheduler_options': kubescheduler_options, + 'kubeproxy_options': kubeproxy_options, 'username': 'fake_user', 'magnum_url': mock_osc.magnum_url.return_value, 'region_name': mock_osc.cinder_region_name.return_value, 'loadbalancing_protocol': 'HTTP', 'kubernetes_port': 8080, 'kube_tag': kube_tag, + 'etcd_tag': etcd_tag, + 'flannel_tag': flannel_tag, 'container_infra_prefix': container_infra_prefix, 'nodes_affinity_policy': 'soft-anti-affinity', 'availability_zone': availability_zone, - }} + 'cert_manager_api': cert_manager_api, + 'calico_tag': calico_tag, + 'calico_cni_tag': calico_cni_tag, + 'calico_kube_controllers_tag': calico_kube_controllers_tag, + 'calico_ipv4pool': calico_ipv4pool, + 'pods_network_cidr': pods_network_cidr, + 'ingress_controller': ingress_controller, + 'ingress_controller_role': ingress_controller_role}} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, diff -Nru magnum-6.0.1/magnum/tests/unit/objects/test_federation.py magnum-6.1.0/magnum/tests/unit/objects/test_federation.py --- magnum-6.0.1/magnum/tests/unit/objects/test_federation.py 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/objects/test_federation.py 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,161 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_utils import uuidutils +from testtools.matchers import HasLength + +from magnum.common import exception +from magnum import objects +from magnum.tests.unit.db import base +from magnum.tests.unit.db import utils + + +class TestFederationObject(base.DbTestCase): + def setUp(self): + super(TestFederationObject, self).setUp() + self.fake_federation = utils.get_test_federation( + uuid=uuidutils.generate_uuid(), + hostcluster_id=uuidutils.generate_uuid(), + member_ids=[] + ) + + def test_get_by_id(self): + federation_id = self.fake_federation['id'] + with mock.patch.object(self.dbapi, 'get_federation_by_id', + autospec=True) as mock_get_federation: + mock_get_federation.return_value = self.fake_federation + federation = objects.Federation.get(self.context, federation_id) + mock_get_federation.assert_called_once_with(self.context, + federation_id) + self.assertEqual(self.context, federation._context) + + def test_get_by_uuid(self): + federation_uuid = self.fake_federation['uuid'] + with mock.patch.object(self.dbapi, 'get_federation_by_uuid', + autospec=True) as mock_get_federation: + mock_get_federation.return_value = self.fake_federation + federation = objects.Federation.get(self.context, federation_uuid) + mock_get_federation.assert_called_once_with(self.context, + federation_uuid) + self.assertEqual(self.context, federation._context) + + def test_get_by_name(self): + name = self.fake_federation['name'] + with mock.patch.object(self.dbapi, 'get_federation_by_name', + autospec=True) as mock_get_federation: + mock_get_federation.return_value = self.fake_federation + federation = objects.Federation.get_by_name(self.context, name) + mock_get_federation.assert_called_once_with(self.context, name) + self.assertEqual(self.context, federation._context) + + def test_get_bad_id_and_uuid(self): + self.assertRaises(exception.InvalidIdentity, + objects.Federation.get, self.context, 'not-a-uuid') + + def test_list(self): + with mock.patch.object(self.dbapi, 'get_federation_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_federation] + federations = objects.Federation.list(self.context) + self.assertEqual(1, mock_get_list.call_count) + self.assertThat(federations, HasLength(1)) + self.assertIsInstance(federations[0], objects.Federation) + self.assertEqual(self.context, federations[0]._context) + + def test_list_all(self): + with mock.patch.object(self.dbapi, 'get_federation_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_federation] + self.context.all_tenants = True + federations = objects.Federation.list(self.context) + mock_get_list.assert_called_once_with( + self.context, limit=None, marker=None, filters=None, + sort_dir=None, sort_key=None) + self.assertEqual(1, mock_get_list.call_count) + self.assertThat(federations, HasLength(1)) + self.assertIsInstance(federations[0], objects.Federation) + self.assertEqual(self.context, federations[0]._context) + + def test_list_with_filters(self): + with mock.patch.object(self.dbapi, 'get_federation_list', + autospec=True) as mock_get_list: + mock_get_list.return_value = [self.fake_federation] + filters = {'name': 'federation1'} + federations = objects.Federation.list(self.context, + filters=filters) + + mock_get_list.assert_called_once_with(self.context, sort_key=None, + sort_dir=None, + filters=filters, limit=None, + marker=None) + self.assertEqual(1, mock_get_list.call_count) + self.assertThat(federations, HasLength(1)) + self.assertIsInstance(federations[0], objects.Federation) + self.assertEqual(self.context, federations[0]._context) + + def test_create(self): + with mock.patch.object(self.dbapi, 'create_federation', + autospec=True) as mock_create_federation: + mock_create_federation.return_value = self.fake_federation + federation = objects.Federation(self.context, + **self.fake_federation) + federation.create() + mock_create_federation.assert_called_once_with( + self.fake_federation) + self.assertEqual(self.context, federation._context) + + def test_destroy(self): + uuid = self.fake_federation['uuid'] + with mock.patch.object(self.dbapi, 'get_federation_by_uuid', + autospec=True) as mock_get_federation: + mock_get_federation.return_value = self.fake_federation + with mock.patch.object(self.dbapi, 'destroy_federation', + autospec=True) as mock_destroy_federation: + federation = objects.Federation.get_by_uuid(self.context, uuid) + federation.destroy() + mock_get_federation.assert_called_once_with(self.context, uuid) + mock_destroy_federation.assert_called_once_with(uuid) + self.assertEqual(self.context, federation._context) + + def test_save(self): + uuid = self.fake_federation['uuid'] + with mock.patch.object(self.dbapi, 'get_federation_by_uuid', + autospec=True) as mock_get_federation: + mock_get_federation.return_value = self.fake_federation + with mock.patch.object(self.dbapi, 'update_federation', + autospec=True) as mock_update_federation: + federation = objects.Federation.get_by_uuid(self.context, uuid) + federation.member_ids = ['new-member'] + federation.save() + + mock_get_federation.assert_called_once_with(self.context, uuid) + mock_update_federation.assert_called_once_with( + uuid, {'member_ids': ['new-member']}) + self.assertEqual(self.context, federation._context) + + def test_refresh(self): + uuid = self.fake_federation['uuid'] + new_uuid = uuidutils.generate_uuid() + returns = [dict(self.fake_federation, uuid=uuid), + dict(self.fake_federation, uuid=new_uuid)] + expected = [mock.call(self.context, uuid), + mock.call(self.context, uuid)] + with mock.patch.object(self.dbapi, 'get_federation_by_uuid', + side_effect=returns, + autospec=True) as mock_get_federation: + federation = objects.Federation.get_by_uuid(self.context, uuid) + self.assertEqual(uuid, federation.uuid) + federation.refresh() + self.assertEqual(new_uuid, federation.uuid) + self.assertEqual(expected, mock_get_federation.call_args_list) + self.assertEqual(self.context, federation._context) diff -Nru magnum-6.0.1/magnum/tests/unit/objects/test_objects.py magnum-6.1.0/magnum/tests/unit/objects/test_objects.py --- magnum-6.0.1/magnum/tests/unit/objects/test_objects.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/objects/test_objects.py 2018-02-23 14:02:04.000000000 +0000 @@ -363,6 +363,7 @@ 'MagnumService': '1.0-2d397ec59b0046bd5ec35cd3e06efeca', 'Stats': '1.0-73a1cd6e3c0294c932a66547faba216c', 'Quota': '1.0-94e100aebfa88f7d8428e007f2049c18', + 'Federation': '1.0-166da281432b083f0e4b851336e12e20' } diff -Nru magnum-6.0.1/magnum/tests/unit/objects/utils.py magnum-6.1.0/magnum/tests/unit/objects/utils.py --- magnum-6.0.1/magnum/tests/unit/objects/utils.py 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/magnum/tests/unit/objects/utils.py 2018-02-23 14:02:04.000000000 +0000 @@ -157,6 +157,33 @@ return magnum_service +def get_test_federation(context, **kw): + """Return a Federation object with appropriate attributes. + + NOTE: The object leaves the attributes marked as changed, such + that a create() could be used to commit it to the DB. + """ + db_federation = db_utils.get_test_federation(**kw) + # Let DB generate ID if it isn't specified explicitly + if 'id' not in kw: + del db_federation['id'] + federation = objects.Federation(context) + for key in db_federation: + setattr(federation, key, db_federation[key]) + return federation + + +def create_test_federation(context, **kw): + """Create and return a test Federation object. + + Create a Federation in the DB and return a Federation object with + appropriate attributes. + """ + federation = get_test_federation(context, **kw) + federation.create() + return federation + + def datetime_or_none(dt): """Validate a datetime or None value.""" if dt is None: diff -Nru magnum-6.0.1/magnum.egg-info/pbr.json magnum-6.1.0/magnum.egg-info/pbr.json --- magnum-6.0.1/magnum.egg-info/pbr.json 2018-02-09 15:27:51.000000000 +0000 +++ magnum-6.1.0/magnum.egg-info/pbr.json 2018-02-23 14:05:48.000000000 +0000 @@ -1 +1 @@ -{"git_version": "710192a", "is_release": true} \ No newline at end of file +{"git_version": "dd1a2aa", "is_release": true} \ No newline at end of file diff -Nru magnum-6.0.1/magnum.egg-info/PKG-INFO magnum-6.1.0/magnum.egg-info/PKG-INFO --- magnum-6.0.1/magnum.egg-info/PKG-INFO 2018-02-09 15:27:51.000000000 +0000 +++ magnum-6.1.0/magnum.egg-info/PKG-INFO 2018-02-23 14:05:48.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: magnum -Version: 6.0.1 +Version: 6.1.0 Summary: Container Management project for OpenStack Home-page: http://docs.openstack.org/magnum/latest/ Author: OpenStack diff -Nru magnum-6.0.1/magnum.egg-info/requires.txt magnum-6.1.0/magnum.egg-info/requires.txt --- magnum-6.0.1/magnum.egg-info/requires.txt 2018-02-09 15:27:51.000000000 +0000 +++ magnum-6.1.0/magnum.egg-info/requires.txt 2018-02-23 14:05:48.000000000 +0000 @@ -16,19 +16,19 @@ kubernetes>=4.0.0 marathon!=0.9.1,>=0.8.6 netaddr>=0.7.18 -oslo.concurrency>=3.20.0 +oslo.concurrency>=3.25.0 oslo.config>=5.1.0 oslo.context>=2.19.2 oslo.db>=4.27.0 oslo.i18n>=3.15.3 -oslo.log>=3.30.0 +oslo.log>=3.36.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=1.30.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.utils>=3.33.0 -oslo.versionedobjects>=1.28.0 +oslo.versionedobjects>=1.31.2 oslo.reports>=1.18.0 pbr!=2.1.0,>=2.0.0 pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 @@ -43,7 +43,7 @@ setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=16.0 six>=1.10.0 stevedore>=1.20.0 -taskflow>=2.7.0 +taskflow>=2.16.0 cryptography!=2.0,>=1.9 Werkzeug>=0.7 diff -Nru magnum-6.0.1/magnum.egg-info/SOURCES.txt magnum-6.1.0/magnum.egg-info/SOURCES.txt --- magnum-6.0.1/magnum.egg-info/SOURCES.txt 2018-02-09 15:27:53.000000000 +0000 +++ magnum-6.1.0/magnum.egg-info/SOURCES.txt 2018-02-23 14:05:49.000000000 +0000 @@ -215,6 +215,7 @@ magnum/api/controllers/v1/cluster.py magnum/api/controllers/v1/cluster_template.py magnum/api/controllers/v1/collection.py +magnum/api/controllers/v1/federation.py magnum/api/controllers/v1/magnum_services.py magnum/api/controllers/v1/quota.py magnum/api/controllers/v1/stats.py @@ -255,6 +256,7 @@ magnum/common/policies/certificate.py magnum/common/policies/cluster.py magnum/common/policies/cluster_template.py +magnum/common/policies/federation.py magnum/common/policies/magnum_service.py magnum/common/policies/quota.py magnum/common/policies/stats.py @@ -272,6 +274,7 @@ magnum/conductor/handlers/ca_conductor.py magnum/conductor/handlers/cluster_conductor.py magnum/conductor/handlers/conductor_listener.py +magnum/conductor/handlers/federation_conductor.py magnum/conductor/handlers/indirection_api.py magnum/conductor/handlers/common/__init__.py magnum/conductor/handlers/common/cert_manager.py @@ -418,6 +421,7 @@ magnum/drivers/common/templates/environments/no_volume.yaml magnum/drivers/common/templates/environments/with_etcd_volume.yaml magnum/drivers/common/templates/environments/with_master_lb.yaml +magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml magnum/drivers/common/templates/environments/with_private_network.yaml magnum/drivers/common/templates/environments/with_volume.yaml magnum/drivers/common/templates/fragments/api_gateway_switcher_master.yaml @@ -432,17 +436,21 @@ magnum/drivers/common/templates/fragments/network_switcher_existing.yaml magnum/drivers/common/templates/fragments/network_switcher_private.yaml magnum/drivers/common/templates/kubernetes/fragments/add-proxy.sh +magnum/drivers/common/templates/kubernetes/fragments/calico-service.sh magnum/drivers/common/templates/kubernetes/fragments/configure-etcd.sh magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-master.sh magnum/drivers/common/templates/kubernetes/fragments/configure-kubernetes-minion.sh magnum/drivers/common/templates/kubernetes/fragments/core-dns-service.sh magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh +magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager +magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller +magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-traefik magnum/drivers/common/templates/kubernetes/fragments/enable-node-exporter.sh magnum/drivers/common/templates/kubernetes/fragments/enable-prometheus-monitoring magnum/drivers/common/templates/kubernetes/fragments/enable-services-master.sh magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh +magnum/drivers/common/templates/kubernetes/fragments/kube-apiserver-to-kubelet-role.sh magnum/drivers/common/templates/kubernetes/fragments/kube-dashboard-service.sh -magnum/drivers/common/templates/kubernetes/fragments/kube-system-namespace-service.sh magnum/drivers/common/templates/kubernetes/fragments/make-cert-client.sh magnum/drivers/common/templates/kubernetes/fragments/make-cert.sh magnum/drivers/common/templates/kubernetes/fragments/network-config-service.sh @@ -592,6 +600,7 @@ magnum/objects/certificate.py magnum/objects/cluster.py magnum/objects/cluster_template.py +magnum/objects/federation.py magnum/objects/fields.py magnum/objects/magnum_service.py magnum/objects/quota.py @@ -680,6 +689,7 @@ magnum/tests/unit/api/controllers/v1/test_certificate.py magnum/tests/unit/api/controllers/v1/test_cluster.py magnum/tests/unit/api/controllers/v1/test_cluster_template.py +magnum/tests/unit/api/controllers/v1/test_federation.py magnum/tests/unit/api/controllers/v1/test_magnum_service.py magnum/tests/unit/api/controllers/v1/test_quota.py magnum/tests/unit/api/controllers/v1/test_stats.py @@ -722,6 +732,7 @@ magnum/tests/unit/conductor/handlers/test_ca_conductor.py magnum/tests/unit/conductor/handlers/test_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_conductor_listener.py +magnum/tests/unit/conductor/handlers/test_federation_conductor.py magnum/tests/unit/conductor/handlers/test_indirection_api.py magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py @@ -750,6 +761,7 @@ magnum/tests/unit/objects/__init__.py magnum/tests/unit/objects/test_cluster.py magnum/tests/unit/objects/test_cluster_template.py +magnum/tests/unit/objects/test_federation.py magnum/tests/unit/objects/test_fields.py magnum/tests/unit/objects/test_magnum_service.py magnum/tests/unit/objects/test_objects.py @@ -771,6 +783,7 @@ releasenotes/notes/CVE-2016-7404-f53e62a4a40e4d30.yaml releasenotes/notes/add-container_infra_prefix-516cc43fbc5a0617.yaml releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml +releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml @@ -792,10 +805,14 @@ releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml releasenotes/notes/bug-1718947-0d4e67529e2817d7.yaml releasenotes/notes/bug-1722522-d94743c6362a5e48.yaml +releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml +releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml releasenotes/notes/change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml +releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml releasenotes/notes/docker-volume-type-46044734f5a27661.yaml releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml +releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml @@ -808,7 +825,9 @@ releasenotes/notes/support-policy-and-doc-in-code-0c19e479dbd953c9.yaml releasenotes/notes/support_nodes_affinity_policy-22253fb9cf6739ec.yaml releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml +releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml releasenotes/notes/update-swarm-73d4340a881bff2f.yaml +releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst diff -Nru magnum-6.0.1/PKG-INFO magnum-6.1.0/PKG-INFO --- magnum-6.0.1/PKG-INFO 2018-02-09 15:27:53.000000000 +0000 +++ magnum-6.1.0/PKG-INFO 2018-02-23 14:05:49.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: magnum -Version: 6.0.1 +Version: 6.1.0 Summary: Container Management project for OpenStack Home-page: http://docs.openstack.org/magnum/latest/ Author: OpenStack diff -Nru magnum-6.0.1/releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml magnum-6.1.0/releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml --- magnum-6.0.1/releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,11 @@ +--- +features: + - | + This release introduces 'federations' endpoint + to Magnum API, which allows an admin to create + and manage federations of clusters through Magnum. + As the feature is still under development, + the endpoints are not bound to any driver yet. + For more details, please refer to bp/federation-api [1]. + + [1] https://review.openstack.org/#/q/topic:bp/federation-api diff -Nru magnum-6.0.1/releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml magnum-6.1.0/releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml --- magnum-6.0.1/releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/broken-kuberenetes-client-d2d1da6029825208.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -10,6 +10,10 @@ task uses the kubernetes client for kubernetes clusters and it crashes the sync_cluster_status and send_cluster_metrics tasks. https://bugs.launchpad.net/magnum/+bug/1746510 + Additionally, the kubernetes scale manager needs to be disabled + to not break the scale down command completely. Note, that when magnum + scales down the cluster will pick the nodes to scale randomly. + upgrade: - | In magnum configuration, in [drivers] set send_cluster_metrics = False to diff -Nru magnum-6.0.1/releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml magnum-6.1.0/releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml --- magnum-6.0.1/releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,5 @@ +--- +issues: + - | + Adding 'calico' as network driver for Kubernetes so as to support network + isolation between namespace with k8s network policy. diff -Nru magnum-6.0.1/releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml magnum-6.1.0/releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml --- magnum-6.0.1/releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,5 @@ +--- +features: + - | + Add new label 'cert_manager_api' enabling the kubernetes certificate + manager api. diff -Nru magnum-6.0.1/releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml magnum-6.1.0/releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml --- magnum-6.0.1/releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,7 @@ +--- +fixes: + - | + Now user can update labels in cluster-template. Previously string is + passed as a value to labels, but we know that labels can only hold + dictionary values. Now we are parsing the string and storing it as + dictionary for labels in cluster-template. diff -Nru magnum-6.0.1/releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml magnum-6.1.0/releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml --- magnum-6.0.1/releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Add new labels 'ingress_controller' and 'ingress_controller_role' enabling + the deployment of a Kubernetes Ingress Controller backend for clusters. + Default for 'ingress_controller' is '' (meaning no controller deployed), + with possible values being 'traefik'. + Default for 'ingress_controller_role' is 'ingress'. diff -Nru magnum-6.0.1/releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml magnum-6.1.0/releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml --- magnum-6.0.1/releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,8 @@ +--- +features: + - | + Update kubernetes dashboard to `v1.8.3` which is compatible via kubectl + proxy. Addionally, heapster is deployed as standalone deployemt and the + user can enable a grafana-influx stack with the + `influx_grafana_dashboard_enabled` label. See the kubernetes dashboard + documenation for more details. https://github.com/kubernetes/dashboard/wiki diff -Nru magnum-6.0.1/releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml magnum-6.1.0/releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml --- magnum-6.0.1/releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml 1970-01-01 00:00:00.000000000 +0000 +++ magnum-6.1.0/releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml 2018-02-23 14:02:04.000000000 +0000 @@ -0,0 +1,10 @@ +--- +features: + - | + Update k8s_fedora_atomic driver to the latest Fedora Atomic 27 release + and run etcd and flanneld in system containers which are removed from + the base OS. +upgrade: + - | + New clusters should be created with kube_tag=v1.9.3 or later. v1.9.3 is + the default version in the queens release. diff -Nru magnum-6.0.1/requirements.txt magnum-6.1.0/requirements.txt --- magnum-6.0.1/requirements.txt 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/requirements.txt 2018-02-23 14:02:11.000000000 +0000 @@ -23,19 +23,19 @@ kubernetes>=4.0.0 # Apache-2.0 marathon!=0.9.1,>=0.8.6 # MIT netaddr>=0.7.18 # BSD -oslo.concurrency>=3.20.0 # Apache-2.0 +oslo.concurrency>=3.25.0 # Apache-2.0 oslo.config>=5.1.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.db>=4.27.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 -oslo.log>=3.30.0 # Apache-2.0 +oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 -oslo.versionedobjects>=1.28.0 # Apache-2.0 +oslo.versionedobjects>=1.31.2 # Apache-2.0 oslo.reports>=1.18.0 # Apache-2.0 pbr!=2.1.0,>=2.0.0 # Apache-2.0 pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD @@ -50,6 +50,6 @@ setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=16.0 # PSF/ZPL six>=1.10.0 # MIT stevedore>=1.20.0 # Apache-2.0 -taskflow>=2.7.0 # Apache-2.0 +taskflow>=2.16.0 # Apache-2.0 cryptography!=2.0,>=1.9 # BSD/Apache-2.0 Werkzeug>=0.7 # BSD License diff -Nru magnum-6.0.1/specs/containers-service.rst magnum-6.1.0/specs/containers-service.rst --- magnum-6.0.1/specs/containers-service.rst 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/specs/containers-service.rst 2018-02-23 14:02:04.000000000 +0000 @@ -175,7 +175,7 @@ |           +-------+ |  | +-----+                   | |                     |  |                           | +-----------+---------+  +---------------+-----------+ -             |                            |             +             |                            | +-----------+----+ Compute Host ---------|-----------+ |                                    +---+---+       | |                               +----+ Relay +---+   | diff -Nru magnum-6.0.1/test-requirements.txt magnum-6.1.0/test-requirements.txt --- magnum-6.0.1/test-requirements.txt 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/test-requirements.txt 2018-02-23 14:02:11.000000000 +0000 @@ -12,14 +12,14 @@ fixtures>=3.0.0 # Apache-2.0/BSD hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 mock>=2.0.0 # BSD -openstackdocstheme>=1.17.0 # Apache-2.0 -oslotest>=1.10.0 # Apache-2.0 +openstackdocstheme>=1.18.1 # Apache-2.0 +oslotest>=3.2.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 os-testr>=1.0.0 # Apache-2.0 python-subunit>=1.0.0 # Apache-2.0/BSD pytz>=2013.6 # MIT -sphinx>=1.6.2 # BSD +sphinx!=1.6.6,>=1.6.2 # BSD testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.2.0 # MIT diff -Nru magnum-6.0.1/tox.ini magnum-6.1.0/tox.ini --- magnum-6.0.1/tox.ini 2018-02-09 15:24:26.000000000 +0000 +++ magnum-6.1.0/tox.ini 2018-02-23 14:02:11.000000000 +0000 @@ -6,7 +6,7 @@ [testenv] usedevelop = True install_command = - pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} -U {opts} {packages} + pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/queens} -U {opts} {packages} whitelist_externals = bash find rm