diff -Nru ceilometer-10.0.1/AUTHORS ceilometer-11.0.0/AUTHORS --- ceilometer-10.0.1/AUTHORS 2018-06-14 13:58:01.000000000 +0000 +++ ceilometer-11.0.0/AUTHORS 2018-07-30 18:10:26.000000000 +0000 @@ -44,6 +44,7 @@ ChangBo Guo(gcb) Chaozhe.Chen Charles Bitter +Chen Hanxiao ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah @@ -52,6 +53,7 @@ Christian Berendt Christian Martinez Christian Schwede +Christian Zunker Chuck Short Clark Boylan Claudiu Belu @@ -91,6 +93,7 @@ Fabio Giannetti Fei Long Wang Feilong Wang +Felix Walter Feng Xi Yan Fengqian Gao Flavio Percoco @@ -227,6 +230,7 @@ Patrick East Paul Belanger Paul Bourke +Pavlo Shchelokovskyy Peter Nordquist Peter Portante Petr Kovar @@ -315,6 +319,7 @@ Vu Cong Tuan WenyanZhang Wenzhi Yu +Witold Bedyk Wu Wenxiang Xia Linjuan XiaBing Yao @@ -372,6 +377,7 @@ guillaume pernot hanxi.liu hgangwx +inspurericzhang jiaxi jimmygc jing.liuqing @@ -418,6 +424,8 @@ venkatamahesh vivek.nandavanam vivek.nandavanam +wangqi +wangqiangbj wbluo0907 xialinjuan xianbin @@ -426,6 +434,7 @@ xiaozhuangqing xiexianbin xingzhou +xqk xugang xuqiankun yanghuichan diff -Nru ceilometer-10.0.1/bindep.txt ceilometer-11.0.0/bindep.txt --- ceilometer-10.0.1/bindep.txt 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/bindep.txt 2018-07-30 18:08:00.000000000 +0000 @@ -4,3 +4,4 @@ build-essential [platform:dpkg] libffi-dev [platform:dpkg] gettext [platform:dpkg] +python37 [platform:rpm py37] diff -Nru ceilometer-10.0.1/ceilometer/cmd/polling.py ceilometer-11.0.0/ceilometer/cmd/polling.py --- ceilometer-10.0.1/ceilometer/cmd/polling.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/cmd/polling.py 2018-07-30 18:08:00.000000000 +0000 @@ -62,7 +62,6 @@ CLI_OPTS = [ MultiChoicesOpt('polling-namespaces', default=['compute', 'central'], - choices=['compute', 'central', 'ipmi'], dest='polling_namespaces', help='Polling namespace(s) to be used while ' 'resource polling'), diff -Nru ceilometer-10.0.1/ceilometer/compute/virt/hyperv/inspector.py ceilometer-11.0.0/ceilometer/compute/virt/hyperv/inspector.py --- ceilometer-10.0.1/ceilometer/compute/virt/hyperv/inspector.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/compute/virt/hyperv/inspector.py 2018-07-30 18:08:00.000000000 +0000 @@ -26,39 +26,45 @@ from ceilometer.compute.virt import inspector as virt_inspector -def convert_exceptions(function, exception_map): +def convert_exceptions(exception_map, yields=True): expected_exceptions = tuple(exception_map.keys()) - @functools.wraps(function) - def wrapper(*args, **kwargs): - try: - return function(*args, **kwargs) - except expected_exceptions as ex: - # exception might be a subclass of an expected exception. - for expected in expected_exceptions: - if isinstance(ex, expected): - raised_exception = exception_map[expected] - break - - exc_info = sys.exc_info() - # NOTE(claudiub): Python 3 raises the exception object given as - # the second argument in six.reraise. - # The original message will be maintained by passing the original - # exception. - exc = raised_exception(six.text_type(exc_info[1])) - six.reraise(raised_exception, exc, exc_info[2]) - return wrapper - - -def decorate_all_methods(decorator, *args, **kwargs): - def decorate(cls): - for attr in cls.__dict__: - class_member = getattr(cls, attr) - if callable(class_member): - setattr(cls, attr, decorator(class_member, *args, **kwargs)) - return cls + def _reraise_exception(exc): + # exception might be a subclass of an expected exception. + for expected in expected_exceptions: + if isinstance(exc, expected): + raised_exception = exception_map[expected] + break + + exc_info = sys.exc_info() + # NOTE(claudiub): Python 3 raises the exception object given as + # the second argument in six.reraise. + # The original message will be maintained by passing the + # original exception. + exc = raised_exception(six.text_type(exc_info[1])) + six.reraise(raised_exception, exc, exc_info[2]) + + def decorator(function): + if yields: + @functools.wraps(function) + def wrapper(*args, **kwargs): + try: + # NOTE(claudiub): We're consuming the function's yield in + # order to avoid yielding a generator. + for item in function(*args, **kwargs): + yield item + except expected_exceptions as ex: + _reraise_exception(ex) + else: + @functools.wraps(function) + def wrapper(*args, **kwargs): + try: + return function(*args, **kwargs) + except expected_exceptions as ex: + _reraise_exception(ex) - return decorate + return wrapper + return decorator exception_conversion_map = collections.OrderedDict([ @@ -69,12 +75,11 @@ (os_win_exc.OSWinException, virt_inspector.InspectorException), ]) -# NOTE(claudiub): the purpose of the decorator below is to prevent any +# NOTE(claudiub): the purpose of the decorators below is to prevent any # os_win exceptions (subclasses of OSWinException) to leak outside of the # HyperVInspector. -@decorate_all_methods(convert_exceptions, exception_conversion_map) class HyperVInspector(virt_inspector.Inspector): def __init__(self, conf): @@ -91,6 +96,7 @@ return float(host_cpu_clock * host_cpu_count) + @convert_exceptions(exception_conversion_map, yields=False) def inspect_instance(self, instance, duration): instance_name = util.instance_name(instance) (cpu_clock_used, @@ -105,6 +111,7 @@ cpu_time=cpu_time, memory_usage=memory_usage) + @convert_exceptions(exception_conversion_map) def inspect_vnics(self, instance, duration): instance_name = util.instance_name(instance) for vnic_metrics in self._utils.get_vnic_metrics(instance_name): @@ -122,6 +129,7 @@ tx_drop=0, tx_errors=0) + @convert_exceptions(exception_conversion_map) def inspect_disks(self, instance, duration): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_metrics(instance_name): @@ -134,6 +142,7 @@ write_bytes=disk_metrics['write_mb'] * units.Mi, errors=0, wr_total_times=0, rd_total_times=0) + @convert_exceptions(exception_conversion_map) def inspect_disk_latency(self, instance, duration): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_latency_metrics( @@ -142,6 +151,7 @@ device=disk_metrics['instance_id'], disk_latency=disk_metrics['disk_latency'] / 1000) + @convert_exceptions(exception_conversion_map) def inspect_disk_iops(self, instance, duration): instance_name = util.instance_name(instance) for disk_metrics in self._utils.get_disk_iops_count(instance_name): diff -Nru ceilometer-10.0.1/ceilometer/compute/virt/libvirt/inspector.py ceilometer-11.0.0/ceilometer/compute/virt/libvirt/inspector.py --- ceilometer-10.0.1/ceilometer/compute/virt/libvirt/inspector.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/compute/virt/libvirt/inspector.py 2018-07-30 18:08:00.000000000 +0000 @@ -151,7 +151,10 @@ memory_swap_in = memory_swap_out = None memory_stats = domain.memoryStats() # Stat provided from libvirt is in KB, converting it to MB. - if 'available' in memory_stats and 'unused' in memory_stats: + if 'usable' in memory_stats and 'available' in memory_stats: + memory_used = (memory_stats['available'] - + memory_stats['usable']) / units.Ki + elif 'available' in memory_stats and 'unused' in memory_stats: memory_used = (memory_stats['available'] - memory_stats['unused']) / units.Ki if 'rss' in memory_stats: diff -Nru ceilometer-10.0.1/ceilometer/data/meters.d/meters.yaml ceilometer-11.0.0/ceilometer/data/meters.d/meters.yaml --- ceilometer-10.0.1/ceilometer/data/meters.d/meters.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/data/meters.d/meters.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -128,6 +128,8 @@ metadata: display_name: $.payload.display_name volume_type: $.payload.volume_type + image_id: $.payload.glance_metadata[?key=image_id][0].value + instance_id: $.payload.volume_attachment[0].server_id - name: 'snapshot.size' event_type: @@ -195,6 +197,9 @@ flavor_name: $.payload.instance_type display_name: $.payload.display_name image_ref: $.payload.image_meta.base_image_ref + launched_at: $.payload.launched_at + created_at: $.payload.created_at + deleted_at: $.payload.deleted_at - name: 'vcpus' event_type: *instance_events diff -Nru ceilometer-10.0.1/ceilometer/gnocchi_client.py ceilometer-11.0.0/ceilometer/gnocchi_client.py --- ceilometer-10.0.1/ceilometer/gnocchi_client.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/gnocchi_client.py 2018-07-30 18:08:00.000000000 +0000 @@ -197,11 +197,38 @@ "attributes": {"provider": {"type": "string", "min_length": 0, "max_length": 255, "required": True}} }]}, + {"desc": "add ipmi sensor resource type", + "type": "create_resource_type", + "resource_type": "ipmi_sensor", + "data": [{ + "attributes": {"node": {"type": "string", "min_length": 0, + "max_length": 255, "required": True}} + }]}, + {"desc": "add launched_at to instance", + "type": "update_attribute_type", + "resource_type": "instance", + "data": [ + {"op": "add", "path": "/attributes/launched_at", + "value": {"type": "datetime", "required": False}}, + {"op": "add", "path": "/attributes/created_at", + "value": {"type": "datetime", "required": False}}, + {"op": "add", "path": "/attributes/deleted_at", + "value": {"type": "datetime", "required": False}}, + ]}, + {"desc": "add instance_id/image_id to volume", + "type": "update_attribute_type", + "resource_type": "volume", + "data": [ + {"op": "add", "path": "/attributes/image_id", + "value": {"type": "uuid", "required": False}}, + {"op": "add", "path": "/attributes/instance_id", + "value": {"type": "uuid", "required": False}}, + ]}, ] # NOTE(sileht): We use LooseVersion because pbr can generate invalid # StrictVersion like 9.0.1.dev226 -REQUIRED_VERSION = version.LooseVersion("4.0.0") +REQUIRED_VERSION = version.LooseVersion("4.2.0") def upgrade_resource_types(conf): diff -Nru ceilometer-10.0.1/ceilometer/hardware/inspector/snmp.py ceilometer-11.0.0/ceilometer/hardware/inspector/snmp.py --- ceilometer-10.0.1/ceilometer/hardware/inspector/snmp.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/hardware/inspector/snmp.py 2018-07-30 18:08:00.000000000 +0000 @@ -195,7 +195,7 @@ def get_oid_value(oid_cache, oid_def, suffix='', host=None): oid, converter = oid_def value = oid_cache[oid + suffix] - if isinstance(value, rfc1905.NoSuchObject): + if isinstance(value, (rfc1905.NoSuchObject, rfc1905.NoSuchInstance)): LOG.debug("OID %s%s has no value" % ( oid, " on %s" % host.hostname if host else "")) return None diff -Nru ceilometer-10.0.1/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po ceilometer-11.0.0/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po --- ceilometer-10.0.1/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/locale/en_GB/LC_MESSAGES/ceilometer.po 2018-07-30 18:08:00.000000000 +0000 @@ -10,7 +10,7 @@ msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-02-26 14:33+0000\n" +"POT-Creation-Date: 2018-03-01 19:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff -Nru ceilometer-10.0.1/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po ceilometer-11.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po --- ceilometer-10.0.1/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/locale/ko_KR/LC_MESSAGES/ceilometer.po 2018-07-30 18:08:00.000000000 +0000 @@ -13,7 +13,7 @@ msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-02-26 14:33+0000\n" +"POT-Creation-Date: 2018-03-01 19:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff -Nru ceilometer-10.0.1/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po ceilometer-11.0.0/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po --- ceilometer-10.0.1/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/locale/pt_BR/LC_MESSAGES/ceilometer.po 2018-07-30 18:08:00.000000000 +0000 @@ -10,7 +10,7 @@ msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-02-26 14:33+0000\n" +"POT-Creation-Date: 2018-03-01 19:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff -Nru ceilometer-10.0.1/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po ceilometer-11.0.0/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po --- ceilometer-10.0.1/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/locale/zh_CN/LC_MESSAGES/ceilometer.po 2018-07-30 18:08:00.000000000 +0000 @@ -17,7 +17,7 @@ msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-02-26 14:33+0000\n" +"POT-Creation-Date: 2018-03-01 19:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff -Nru ceilometer-10.0.1/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po ceilometer-11.0.0/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po --- ceilometer-10.0.1/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/locale/zh_TW/LC_MESSAGES/ceilometer.po 2018-07-30 18:08:00.000000000 +0000 @@ -9,7 +9,7 @@ msgstr "" "Project-Id-Version: ceilometer VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" -"POT-Creation-Date: 2018-02-26 14:33+0000\n" +"POT-Creation-Date: 2018-03-01 19:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" diff -Nru ceilometer-10.0.1/ceilometer/meter/notifications.py ceilometer-11.0.0/ceilometer/meter/notifications.py --- ceilometer-10.0.1/ceilometer/meter/notifications.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/meter/notifications.py 2018-07-30 18:08:00.000000000 +0000 @@ -153,7 +153,7 @@ nb_samples = len(sample['name']) # skip if no meters in payload if nb_samples <= 0: - raise StopIteration + return attributes = self.SAMPLE_ATTRIBUTES + ["message", "metadata"] @@ -172,7 +172,7 @@ '"%(name)s" field instead of %(total)d.' % dict(name=name, nb=nb, total=nb_samples)) - raise StopIteration + return # NOTE(sileht): Transform the sample with multiple values per # attribute into multiple samples with one value per attribute. diff -Nru ceilometer-10.0.1/ceilometer/notification.py ceilometer-11.0.0/ceilometer/notification.py --- ceilometer-10.0.1/ceilometer/notification.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/notification.py 2018-07-30 18:08:00.000000000 +0000 @@ -37,6 +37,7 @@ OPTS = [ cfg.IntOpt('pipeline_processing_queues', + deprecated_for_removal=True, default=10, min=1, help='Number of queues to parallelize workload across. This ' @@ -47,6 +48,7 @@ default=True, help='Acknowledge message when event persistence fails.'), cfg.BoolOpt('workload_partitioning', + deprecated_for_removal=True, default=False, help='Enable workload partitioning, allowing multiple ' 'notification agents to be run simultaneously.'), @@ -206,7 +208,7 @@ # NOTE(gordc): ignore batching as we want pull # to maintain sequencing as much as possible. listener = messaging.get_batch_notification_listener( - transport, targets, endpoints) + transport, targets, endpoints, allow_requeue=True) listener.start( override_pool_size=self.conf.max_parallel_requests ) @@ -238,7 +240,7 @@ self.kill_listeners([self.pipeline_listener]) self.pipeline_listener = messaging.get_batch_notification_listener( - self.transport, targets, endpoints, + self.transport, targets, endpoints, allow_requeue=True, batch_size=self.conf.notification.batch_size, batch_timeout=self.conf.notification.batch_timeout) # NOTE(gordc): set single thread to process data sequentially diff -Nru ceilometer-10.0.1/ceilometer/objectstore/rgw_client.py ceilometer-11.0.0/ceilometer/objectstore/rgw_client.py --- ceilometer-10.0.1/ceilometer/objectstore/rgw_client.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/objectstore/rgw_client.py 2018-07-30 18:08:11.000000000 +0000 @@ -30,11 +30,12 @@ class RGWAdminClient(object): Bucket = namedtuple('Bucket', 'name, num_objects, size') - def __init__(self, endpoint, access_key, secret_key): + def __init__(self, endpoint, access_key, secret_key, implicit_tenants): self.access_key = access_key self.secret = secret_key self.endpoint = endpoint self.hostname = urlparse.urlparse(endpoint).netloc + self.implicit_tenants = implicit_tenants def _make_request(self, path, req_params): uri = "{0}/{1}".format(self.endpoint, path) @@ -51,8 +52,12 @@ return r.json() def get_bucket(self, tenant_id): + if self.implicit_tenants: + rgw_uid = tenant_id + "$" + tenant_id + else: + rgw_uid = tenant_id path = "bucket" - req_params = {"uid": tenant_id, "stats": "true"} + req_params = {"uid": rgw_uid, "stats": "true"} json_data = self._make_request(path, req_params) stats = {'num_buckets': 0, 'buckets': [], 'size': 0, 'num_objects': 0} stats['num_buckets'] = len(json_data) @@ -66,8 +71,12 @@ return stats def get_usage(self, tenant_id): + if self.implicit_tenants: + rgw_uid = tenant_id + "$" + tenant_id + else: + rgw_uid = tenant_id path = "usage" - req_params = {"uid": tenant_id} + req_params = {"uid": rgw_uid} json_data = self._make_request(path, req_params) usage_data = json_data["summary"] return sum((it["total"]["ops"] for it in usage_data)) diff -Nru ceilometer-10.0.1/ceilometer/objectstore/rgw.py ceilometer-11.0.0/ceilometer/objectstore/rgw.py --- ceilometer-10.0.1/ceilometer/objectstore/rgw.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/objectstore/rgw.py 2018-07-30 18:08:11.000000000 +0000 @@ -40,6 +40,12 @@ help='Secret key for Radosgw Admin.') ] +CLIENT_OPTS = [ + cfg.BoolOpt('implicit_tenants', + default=False, + help='Whether RGW uses implicit tenants or not.'), +] + class _Base(plugin_base.PollsterBase): METHOD = 'bucket' @@ -49,6 +55,7 @@ super(_Base, self).__init__(conf) self.access_key = self.conf.rgw_admin_credentials.access_key self.secret = self.conf.rgw_admin_credentials.secret_key + self.implicit_tenants = self.conf.rgw_client.implicit_tenants @property def default_discovery(self): @@ -85,13 +92,14 @@ def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(self.conf, ksclient) if not endpoint: - raise StopIteration() + return try: from ceilometer.objectstore import rgw_client as c_rgw_client rgw_client = c_rgw_client.RGWAdminClient(endpoint, self.access_key, - self.secret) + self.secret, + self.implicit_tenants) except ImportError: raise plugin_base.PollsterPermanentError(tenants) diff -Nru ceilometer-10.0.1/ceilometer/objectstore/swift.py ceilometer-11.0.0/ceilometer/objectstore/swift.py --- ceilometer-10.0.1/ceilometer/objectstore/swift.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/objectstore/swift.py 2018-07-30 18:08:00.000000000 +0000 @@ -83,7 +83,7 @@ def _get_account_info(self, ksclient, tenants): endpoint = self._get_endpoint(self.conf, ksclient) if not endpoint: - raise StopIteration() + return swift_api_method = getattr(swift, '%s_account' % self.METHOD) for t in tenants: diff -Nru ceilometer-10.0.1/ceilometer/opts.py ceilometer-11.0.0/ceilometer/opts.py --- ceilometer-10.0.1/ceilometer/opts.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/opts.py 2018-07-30 18:08:11.000000000 +0000 @@ -93,28 +93,6 @@ help='Number of seconds between checks to see if group ' 'membership has changed'), ]), - ('dispatcher_gnocchi', ( - cfg.StrOpt( - 'filter_project', - deprecated_for_removal=True, - default='gnocchi', - help='Gnocchi project used to filter out samples ' - 'generated by Gnocchi service activity'), - cfg.StrOpt( - 'archive_policy', - deprecated_for_removal=True, - help='The archive policy to use when the dispatcher ' - 'create a new metric.'), - cfg.StrOpt( - 'resources_definition_file', - deprecated_for_removal=True, - default='gnocchi_resources.yaml', - help=('The Yaml file that defines mapping between samples ' - 'and gnocchi resources/metrics')), - cfg.FloatOpt( - 'request_timeout', default=6.05, min=0.0, - deprecated_for_removal=True, - help='Number of seconds before request to gnocchi times out'))), ('event', ceilometer.event.converter.OPTS), ('hardware', itertools.chain( ceilometer.hardware.discovery.OPTS, @@ -130,6 +108,7 @@ ('publisher', ceilometer.publisher.utils.OPTS), ('publisher_notifier', ceilometer.publisher.messaging.NOTIFIER_OPTS), ('rgw_admin_credentials', ceilometer.objectstore.rgw.CREDENTIAL_OPTS), + ('rgw_client', ceilometer.objectstore.rgw.CLIENT_OPTS), ('service_types', itertools.chain(ceilometer.image.discovery.SERVICE_OPTS, ceilometer.neutron_client.SERVICE_OPTS, diff -Nru ceilometer-10.0.1/ceilometer/pipeline/data/event_definitions.yaml ceilometer-11.0.0/ceilometer/pipeline/data/event_definitions.yaml --- ceilometer-10.0.1/ceilometer/pipeline/data/event_definitions.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/pipeline/data/event_definitions.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -11,6 +11,8 @@ fields: payload.display_name resource_id: fields: payload.instance_id + cell_name: + fields: payload.cell_name host: fields: publisher_id.`split(., 1, 1)` service: @@ -79,6 +81,10 @@ fields: payload.status created_at: fields: payload.created_at + image_id: + fields: payload.glance_metadata[?key=image_id][0].value + instance_id: + fields: payload.volume_attachment[0].server_id - event_type: ['volume.exists', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*'] traits: <<: *cinder_traits diff -Nru ceilometer-10.0.1/ceilometer/pipeline/data/event_pipeline.yaml ceilometer-11.0.0/ceilometer/pipeline/data/event_pipeline.yaml --- ceilometer-10.0.1/ceilometer/pipeline/data/event_pipeline.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/pipeline/data/event_pipeline.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -7,6 +7,5 @@ - event_sink sinks: - name: event_sink - transformers: publishers: - gnocchi:// diff -Nru ceilometer-10.0.1/ceilometer/pipeline/data/pipeline.yaml ceilometer-11.0.0/ceilometer/pipeline/data/pipeline.yaml --- ceilometer-10.0.1/ceilometer/pipeline/data/pipeline.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/pipeline/data/pipeline.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -33,9 +33,10 @@ - network_sink sinks: - name: meter_sink - transformers: publishers: - gnocchi:// + + # All these transformers are deprecated, and will be removed in the future, don't use them. - name: cpu_sink transformers: - name: "rate_of_change" @@ -48,6 +49,8 @@ scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" publishers: - gnocchi:// + + # All these transformers are deprecated, and will be removed in the future, don't use them. - name: cpu_delta_sink transformers: - name: "delta" @@ -57,6 +60,8 @@ growth_only: True publishers: - gnocchi:// + + # All these transformers are deprecated, and will be removed in the future, don't use them. - name: disk_sink transformers: - name: "rate_of_change" @@ -72,6 +77,8 @@ type: "gauge" publishers: - gnocchi:// + + # All these transformers are deprecated, and will be removed in the future, don't use them. - name: network_sink transformers: - name: "rate_of_change" diff -Nru ceilometer-10.0.1/ceilometer/polling/manager.py ceilometer-11.0.0/ceilometer/polling/manager.py --- ceilometer-10.0.1/ceilometer/polling/manager.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/polling/manager.py 2018-07-30 18:08:11.000000000 +0000 @@ -45,9 +45,13 @@ OPTS = [ cfg.BoolOpt('batch_polled_samples', default=True, + deprecated_for_removal=True, help='To reduce polling agent load, samples are sent to the ' 'notification agent in a batch. To gain higher ' - 'throughput at the cost of load set this to False.'), + 'throughput at the cost of load set this to False. ' + 'This option is deprecated, to disable batching set ' + 'batch_size = 0 in the polling group.' + ), ] POLLING_OPTS = [ @@ -62,6 +66,10 @@ 'config files. For each sub-group of the agent ' 'pool with the same partitioning_group_prefix a disjoint ' 'subset of pollsters should be loaded.'), + cfg.IntOpt('batch_size', + default=50, + help='Batch size of samples to send to notification agent, ' + 'Set to 0 to disable'), ] @@ -132,6 +140,12 @@ self.resources = collections.defaultdict(resource_factory) self._batch = self.manager.conf.batch_polled_samples + self._batch_size = self.manager.conf.polling.batch_size + + if not self._batch: + # Support deprecated way of disabling baching + self._batch_size = 0 + self._telemetry_secret = self.manager.conf.publisher.telemetry_secret def add(self, pollster, source): @@ -194,7 +208,10 @@ publisher_utils.meter_message_from_counter( sample, self._telemetry_secret )) - if self._batch: + if self._batch_size: + if len(sample_batch) >= self._batch_size: + self._send_notification(sample_batch) + sample_batch = [] sample_batch.append(sample_dict) else: self._send_notification([sample_dict]) @@ -248,7 +265,7 @@ self.extensions = list(itertools.chain(*list(extensions))) + list( itertools.chain(*list(extensions_fb))) - if self.extensions == []: + if not self.extensions: LOG.warning('No valid pollsters can be loaded from %s ' 'namespaces', namespaces) diff -Nru ceilometer-10.0.1/ceilometer/polling/plugin_base.py ceilometer-11.0.0/ceilometer/polling/plugin_base.py --- ceilometer-10.0.1/ceilometer/polling/plugin_base.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/polling/plugin_base.py 2018-07-30 18:08:00.000000000 +0000 @@ -21,10 +21,6 @@ from stevedore import extension -class PluginBase(object): - """Base class for all plugins.""" - - class ExtensionLoadError(Exception): """Error of loading pollster plugin. @@ -50,7 +46,7 @@ @six.add_metaclass(abc.ABCMeta) -class PollsterBase(PluginBase): +class PollsterBase(object): """Base class for plugins that support the polling API.""" def setup_environment(self): diff -Nru ceilometer-10.0.1/ceilometer/publisher/data/gnocchi_resources.yaml ceilometer-11.0.0/ceilometer/publisher/data/gnocchi_resources.yaml --- ceilometer-10.0.1/ceilometer/publisher/data/gnocchi_resources.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/publisher/data/gnocchi_resources.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -1,74 +1,101 @@ --- +archive_policy_default: ceilometer-low +archive_policies: + # NOTE(sileht): We keep "mean" for now to not break all gating that + # use the current tempest scenario. + - name: ceilometer-low + aggregation_methods: + - mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days + - name: ceilometer-low-rate + aggregation_methods: + - mean + - rate:mean + back_window: 0 + definition: + - granularity: 5 minutes + timespan: 30 days resources: - resource_type: identity metrics: - - 'identity.authenticate.success' - - 'identity.authenticate.pending' - - 'identity.authenticate.failure' - - 'identity.user.created' - - 'identity.user.deleted' - - 'identity.user.updated' - - 'identity.group.created' - - 'identity.group.deleted' - - 'identity.group.updated' - - 'identity.role.created' - - 'identity.role.deleted' - - 'identity.role.updated' - - 'identity.project.created' - - 'identity.project.deleted' - - 'identity.project.updated' - - 'identity.trust.created' - - 'identity.trust.deleted' - - 'identity.role_assignment.created' - - 'identity.role_assignment.deleted' + identity.authenticate.success: + identity.authenticate.pending: + identity.authenticate.failure: + identity.user.created: + identity.user.deleted: + identity.user.updated: + identity.group.created: + identity.group.deleted: + identity.group.updated: + identity.role.created: + identity.role.deleted: + identity.role.updated: + identity.project.created: + identity.project.deleted: + identity.project.updated: + identity.trust.created: + identity.trust.deleted: + identity.role_assignment.created: + identity.role_assignment.deleted: - resource_type: ceph_account metrics: - - 'radosgw.objects' - - 'radosgw.objects.size' - - 'radosgw.objects.containers' - - 'radosgw.api.request' - - 'radosgw.containers.objects' - - 'radosgw.containers.objects.size' + radosgw.objects: + radosgw.objects.size: + radosgw.objects.containers: + radosgw.api.request: + radosgw.containers.objects: + radosgw.containers.objects.size: - resource_type: instance metrics: - - 'memory' - - 'memory.usage' - - 'memory.resident' - - 'memory.swap.in' - - 'memory.swap.out' - - 'memory.bandwidth.total' - - 'memory.bandwidth.local' - - 'vcpus' - - 'cpu' - - 'cpu.delta' - - 'cpu_util' - - 'cpu_l3_cache' - - 'disk.root.size' - - 'disk.ephemeral.size' - - 'disk.read.requests' - - 'disk.read.requests.rate' - - 'disk.write.requests' - - 'disk.write.requests.rate' - - 'disk.read.bytes' - - 'disk.read.bytes.rate' - - 'disk.write.bytes' - - 'disk.write.bytes.rate' - - 'disk.latency' - - 'disk.iops' - - 'disk.capacity' - - 'disk.allocation' - - 'disk.usage' - - 'compute.instance.booting.time' - - 'perf.cpu.cycles' - - 'perf.instructions' - - 'perf.cache.references' - - 'perf.cache.misses' + memory: + memory.usage: + memory.resident: + memory.swap.in: + memory.swap.out: + memory.bandwidth.total: + memory.bandwidth.local: + vcpus: + cpu: + archive_policy_name: ceilometer-low-rate + cpu.delta: + cpu_util: + cpu_l3_cache: + disk.root.size: + disk.ephemeral.size: + disk.read.requests: + archive_policy_name: ceilometer-low-rate + disk.read.requests.rate: + disk.write.requests: + archive_policy_name: ceilometer-low-rate + disk.write.requests.rate: + disk.read.bytes: + archive_policy_name: ceilometer-low-rate + disk.read.bytes.rate: + disk.write.bytes: + archive_policy_name: ceilometer-low-rate + disk.write.bytes.rate: + disk.latency: + disk.iops: + disk.capacity: + disk.allocation: + disk.usage: + compute.instance.booting.time: + perf.cpu.cycles: + perf.instructions: + perf.cache.references: + perf.cache.misses: attributes: host: resource_metadata.(instance_host|host) image_ref: resource_metadata.image_ref + launched_at: resource_metadata.launched_at + created_at: resource_metadata.created_at + deleted_at: resource_metadata.deleted_at display_name: resource_metadata.display_name flavor_id: resource_metadata.(instance_flavor_id|(flavor.id)|flavor_id) flavor_name: resource_metadata.(instance_type|(flavor.name)|flavor_name) @@ -82,48 +109,60 @@ - resource_type: instance_network_interface metrics: - - 'network.outgoing.packets.rate' - - 'network.incoming.packets.rate' - - 'network.outgoing.packets' - - 'network.incoming.packets' - - 'network.outgoing.packets.drop' - - 'network.incoming.packets.drop' - - 'network.outgoing.packets.error' - - 'network.incoming.packets.error' - - 'network.outgoing.bytes.rate' - - 'network.incoming.bytes.rate' - - 'network.outgoing.bytes' - - 'network.incoming.bytes' + network.outgoing.packets.rate: + network.incoming.packets.rate: + network.outgoing.packets: + archive_policy_name: ceilometer-low-rate + network.incoming.packets: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.drop: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.drop: + archive_policy_name: ceilometer-low-rate + network.outgoing.packets.error: + archive_policy_name: ceilometer-low-rate + network.incoming.packets.error: + archive_policy_name: ceilometer-low-rate + network.outgoing.bytes.rate: + network.incoming.bytes.rate: + network.outgoing.bytes: + archive_policy_name: ceilometer-low-rate + network.incoming.bytes: + archive_policy_name: ceilometer-low-rate attributes: name: resource_metadata.vnic_name instance_id: resource_metadata.instance_id - resource_type: instance_disk metrics: - - 'disk.device.read.requests' - - 'disk.device.read.requests.rate' - - 'disk.device.write.requests' - - 'disk.device.write.requests.rate' - - 'disk.device.read.bytes' - - 'disk.device.read.bytes.rate' - - 'disk.device.write.bytes' - - 'disk.device.write.bytes.rate' - - 'disk.device.latency' - - 'disk.device.read.latency' - - 'disk.device.write.latency' - - 'disk.device.iops' - - 'disk.device.capacity' - - 'disk.device.allocation' - - 'disk.device.usage' + disk.device.read.requests: + archive_policy_name: ceilometer-low-rate + disk.device.read.requests.rate: + disk.device.write.requests: + archive_policy_name: ceilometer-low-rate + disk.device.write.requests.rate: + disk.device.read.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.read.bytes.rate: + disk.device.write.bytes: + archive_policy_name: ceilometer-low-rate + disk.device.write.bytes.rate: + disk.device.latency: + disk.device.read.latency: + disk.device.write.latency: + disk.device.iops: + disk.device.capacity: + disk.device.allocation: + disk.device.usage: attributes: name: resource_metadata.disk_name instance_id: resource_metadata.instance_id - resource_type: image metrics: - - 'image.size' - - 'image.download' - - 'image.serve' + image.size: + image.download: + image.serve: attributes: name: resource_metadata.name container_format: resource_metadata.container_format @@ -134,137 +173,147 @@ - resource_type: ipmi metrics: - - 'hardware.ipmi.node.power' - - 'hardware.ipmi.node.temperature' - - 'hardware.ipmi.node.inlet_temperature' - - 'hardware.ipmi.node.outlet_temperature' - - 'hardware.ipmi.node.fan' - - 'hardware.ipmi.node.current' - - 'hardware.ipmi.node.voltage' - - 'hardware.ipmi.node.airflow' - - 'hardware.ipmi.node.cups' - - 'hardware.ipmi.node.cpu_util' - - 'hardware.ipmi.node.mem_util' - - 'hardware.ipmi.node.io_util' + hardware.ipmi.node.power: + hardware.ipmi.node.temperature: + hardware.ipmi.node.inlet_temperature: + hardware.ipmi.node.outlet_temperature: + hardware.ipmi.node.fan: + hardware.ipmi.node.current: + hardware.ipmi.node.voltage: + hardware.ipmi.node.airflow: + hardware.ipmi.node.cups: + hardware.ipmi.node.cpu_util: + hardware.ipmi.node.mem_util: + hardware.ipmi.node.io_util: + + - resource_type: ipmi_sensor + metrics: + - 'hardware.ipmi.power' + - 'hardware.ipmi.temperature' + - 'hardware.ipmi.current' + - 'hardware.ipmi.voltage' + attributes: + node: resource_metadata.node - resource_type: network metrics: - - 'bandwidth' - - 'ip.floating' + bandwidth: + ip.floating: event_delete: floatingip.delete.end event_attributes: id: resource_id - resource_type: stack metrics: - - 'stack.create' - - 'stack.update' - - 'stack.delete' - - 'stack.resume' - - 'stack.suspend' + stack.create: + stack.update: + stack.delete: + stack.resume: + stack.suspend: - resource_type: swift_account metrics: - - 'storage.objects.incoming.bytes' - - 'storage.objects.outgoing.bytes' - - 'storage.api.request' - - 'storage.objects.size' - - 'storage.objects' - - 'storage.objects.containers' - - 'storage.containers.objects' - - 'storage.containers.objects.size' + storage.objects.incoming.bytes: + storage.objects.outgoing.bytes: + storage.objects.size: + storage.objects: + storage.objects.containers: + storage.containers.objects: + storage.containers.objects.size: - resource_type: volume metrics: - - 'volume' - - 'volume.size' - - 'snapshot.size' - - 'volume.snapshot.size' - - 'volume.backup.size' + volume: + volume.size: + snapshot.size: + volume.snapshot.size: + volume.backup.size: attributes: display_name: resource_metadata.(display_name|name) volume_type: resource_metadata.volume_type + image_id: resource_metadata.image_id + instance_id: resource_metadata.instance_id event_delete: volume.delete.start event_attributes: id: resource_id - resource_type: volume_provider metrics: - - 'volume.provider.capacity.total' - - 'volume.provider.capacity.free' - - 'volume.provider.capacity.allocated' - - 'volume.provider.capacity.provisioned' - - 'volume.provider.capacity.virtual_free' + volume.provider.capacity.total: + volume.provider.capacity.free: + volume.provider.capacity.allocated: + volume.provider.capacity.provisioned: + volume.provider.capacity.virtual_free: - resource_type: volume_provider_pool metrics: - - 'volume.provider.pool.capacity.total' - - 'volume.provider.pool.capacity.free' - - 'volume.provider.pool.capacity.allocated' - - 'volume.provider.pool.capacity.provisioned' - - 'volume.provider.pool.capacity.virtual_free' + volume.provider.pool.capacity.total: + volume.provider.pool.capacity.free: + volume.provider.pool.capacity.allocated: + volume.provider.pool.capacity.provisioned: + volume.provider.pool.capacity.virtual_free: attributes: provider: resource_metadata.provider - resource_type: host metrics: - - 'hardware.cpu.load.1min' - - 'hardware.cpu.load.5min' - - 'hardware.cpu.load.15min' - - 'hardware.cpu.util' - - 'hardware.memory.total' - - 'hardware.memory.used' - - 'hardware.memory.swap.total' - - 'hardware.memory.swap.avail' - - 'hardware.memory.buffer' - - 'hardware.memory.cached' - - 'hardware.network.ip.outgoing.datagrams' - - 'hardware.network.ip.incoming.datagrams' - - 'hardware.system_stats.cpu.idle' - - 'hardware.system_stats.io.outgoing.blocks' - - 'hardware.system_stats.io.incoming.blocks' + hardware.cpu.load.1min: + hardware.cpu.load.5min: + hardware.cpu.load.15min: + hardware.cpu.util: + hardware.memory.total: + hardware.memory.used: + hardware.memory.swap.total: + hardware.memory.swap.avail: + hardware.memory.buffer: + hardware.memory.cached: + hardware.network.ip.outgoing.datagrams: + hardware.network.ip.incoming.datagrams: + hardware.system_stats.cpu.idle: + hardware.system_stats.io.outgoing.blocks: + hardware.system_stats.io.incoming.blocks: attributes: host_name: resource_metadata.resource_url - resource_type: host_disk metrics: - - 'hardware.disk.size.total' - - 'hardware.disk.size.used' - - 'hardware.disk.read.bytes' - - 'hardware.disk.write.bytes' - - 'hardware.disk.read.requests' - - 'hardware.disk.write.requests' + hardware.disk.size.total: + hardware.disk.size.used: + hardware.disk.read.bytes: + hardware.disk.write.bytes: + hardware.disk.read.requests: + hardware.disk.write.requests: attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.device - resource_type: host_network_interface metrics: - - 'hardware.network.incoming.bytes' - - 'hardware.network.outgoing.bytes' - - 'hardware.network.outgoing.errors' + hardware.network.incoming.bytes: + hardware.network.outgoing.bytes: + hardware.network.outgoing.errors: attributes: host_name: resource_metadata.resource_url device_name: resource_metadata.name - resource_type: nova_compute metrics: - - 'compute.node.cpu.frequency' - - 'compute.node.cpu.idle.percent' - - 'compute.node.cpu.idle.time' - - 'compute.node.cpu.iowait.percent' - - 'compute.node.cpu.iowait.time' - - 'compute.node.cpu.kernel.percent' - - 'compute.node.cpu.kernel.time' - - 'compute.node.cpu.percent' - - 'compute.node.cpu.user.percent' - - 'compute.node.cpu.user.time' + compute.node.cpu.frequency: + compute.node.cpu.idle.percent: + compute.node.cpu.idle.time: + compute.node.cpu.iowait.percent: + compute.node.cpu.iowait.time: + compute.node.cpu.kernel.percent: + compute.node.cpu.kernel.time: + compute.node.cpu.percent: + compute.node.cpu.user.percent: + compute.node.cpu.user.time: attributes: host_name: resource_metadata.host - resource_type: manila_share metrics: - - 'manila.share.size' + manila.share.size: attributes: name: resource_metadata.name host: resource_metadata.host @@ -274,27 +323,27 @@ - resource_type: switch metrics: - - 'switch' - - 'switch.ports' + switch: + switch.ports: attributes: controller: resource_metadata.controller - resource_type: switch_port metrics: - - 'switch.port' - - 'switch.port.uptime' - - 'switch.port.receive.packets' - - 'switch.port.transmit.packets' - - 'switch.port.receive.bytes' - - 'switch.port.transmit.bytes' - - 'switch.port.receive.drops' - - 'switch.port.transmit.drops' - - 'switch.port.receive.errors' - - 'switch.port.transmit.errors' - - 'switch.port.receive.frame_error' - - 'switch.port.receive.overrun_error' - - 'switch.port.receive.crc_error' - - 'switch.port.collision.count' + switch.port: + switch.port.uptime: + switch.port.receive.packets: + switch.port.transmit.packets: + switch.port.receive.bytes: + switch.port.transmit.bytes: + switch.port.receive.drops: + switch.port.transmit.drops: + switch.port.receive.errors: + switch.port.transmit.errors: + switch.port.receive.frame_error: + switch.port.receive.overrun_error: + switch.port.receive.crc_error: + switch.port.collision.count: attributes: switch: resource_metadata.switch port_number_on_switch: resource_metadata.port_number_on_switch @@ -303,20 +352,20 @@ - resource_type: port metrics: - - 'port' - - 'port.uptime' - - 'port.receive.packets' - - 'port.transmit.packets' - - 'port.receive.bytes' - - 'port.transmit.bytes' - - 'port.receive.drops' - - 'port.receive.errors' + port: + port.uptime: + port.receive.packets: + port.transmit.packets: + port.receive.bytes: + port.transmit.bytes: + port.receive.drops: + port.receive.errors: attributes: controller: resource_metadata.controller - resource_type: switch_table metrics: - - 'switch.table.active.entries' + switch.table.active.entries: attributes: controller: resource_metadata.controller switch: resource_metadata.switch diff -Nru ceilometer-10.0.1/ceilometer/publisher/gnocchi.py ceilometer-11.0.0/ceilometer/publisher/gnocchi.py --- ceilometer-10.0.1/ceilometer/publisher/gnocchi.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/publisher/gnocchi.py 2018-07-30 18:08:00.000000000 +0000 @@ -54,12 +54,12 @@ class ResourcesDefinition(object): MANDATORY_FIELDS = {'resource_type': six.string_types, - 'metrics': list} + 'metrics': (dict, list)} MANDATORY_EVENT_FIELDS = {'id': six.string_types} - def __init__(self, definition_cfg, default_archive_policy, plugin_manager): - self._default_archive_policy = default_archive_policy + def __init__(self, definition_cfg, archive_policy_default, + archive_policy_override, plugin_manager): self.cfg = definition_cfg self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg) @@ -79,24 +79,44 @@ name, attr_cfg, plugin_manager) self.metrics = {} - for t in self.cfg['metrics']: - archive_policy = self.cfg.get('archive_policy', - self._default_archive_policy) - if archive_policy is None: - self.metrics[t] = {} - else: - self.metrics[t] = dict(archive_policy_name=archive_policy) + + # NOTE(sileht): Convert old list to new dict format + if isinstance(self.cfg['metrics'], list): + values = [None] * len(self.cfg['metrics']) + self.cfg['metrics'] = dict(zip(self.cfg['metrics'], values)) + + for m, extra in self.cfg['metrics'].items(): + if not extra: + extra = {} + + if not extra.get("archive_policy_name"): + extra["archive_policy_name"] = archive_policy_default + + if archive_policy_override: + extra["archive_policy_name"] = archive_policy_override + + # NOTE(sileht): For backward compat, this is after the override to + # preserve the wierd previous behavior. We don't really care as we + # deprecate it. + if 'archive_policy' in self.cfg: + LOG.warning("archive_policy '%s' for a resource-type (%s) is " + "deprecated, set it for each metric instead.", + self.cfg["archive_policy"], + self.cfg["resource_type"]) + extra["archive_policy_name"] = self.cfg['archive_policy'] + + self.metrics[m] = extra @staticmethod def _check_required_and_types(expected, definition): - for field, field_type in expected.items(): + for field, field_types in expected.items(): if field not in definition: raise declarative.ResourceDefinitionException( _("Required field %s not specified") % field, definition) - if not isinstance(definition[field], field_type): + if not isinstance(definition[field], field_types): raise declarative.ResourceDefinitionException( _("Required field %(field)s should be a %(type)s") % - {'field': field, 'type': field_type}, definition) + {'field': field, 'type': field_types}, definition) @staticmethod def _ensure_list(value): @@ -178,23 +198,19 @@ # TODO(jd) allow to override Gnocchi endpoint via the host in the URL options = urlparse.parse_qs(parsed_url.query) - self.filter_project = options.get( - 'filter_project', - [conf.dispatcher_gnocchi.filter_project])[-1] + self.filter_project = options.get('filter_project', [True])[-1] resources_definition_file = options.get( - 'resources_definition_file', - [conf.dispatcher_gnocchi.resources_definition_file])[-1] - archive_policy = options.get( - 'archive_policy', - [conf.dispatcher_gnocchi.archive_policy])[-1] - self.resources_definition = self._load_resources_definitions( - conf, archive_policy, resources_definition_file) + 'resources_definition_file', ['gnocchi_resources.yaml'])[-1] + + archive_policy_override = options.get('archive_policy', [None])[-1] + self.resources_definition, self.archive_policies_definition = ( + self._load_definitions(conf, archive_policy_override, + resources_definition_file)) self.metric_map = dict((metric, rd) for rd in self.resources_definition for metric in rd.metrics) - timeout = options.get('timeout', - [conf.dispatcher_gnocchi.request_timeout])[-1] + timeout = options.get('timeout', [6.05])[-1] self._ks_client = keystone_client.get_client(conf) self.cache = None @@ -224,25 +240,40 @@ self._already_logged_event_types = set() self._already_logged_metric_names = set() + self._already_configured_archive_policies = False + @staticmethod - def _load_resources_definitions(conf, archive_policy, - resources_definition_file): + def _load_definitions(conf, archive_policy_override, + resources_definition_file): plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait_plugin') data = declarative.load_definitions( conf, {}, resources_definition_file, pkg_resources.resource_filename(__name__, "data/gnocchi_resources.yaml")) + + archive_policy_default = data.get("archive_policy_default", "low") resource_defs = [] for resource in data.get('resources', []): try: resource_defs.append(ResourcesDefinition( resource, - archive_policy, plugin_manager)) + archive_policy_default, + archive_policy_override, + plugin_manager)) except Exception as exc: LOG.error("Failed to load resource due to error %s" % exc) - return resource_defs + return resource_defs, data.get("archive_policies", []) + + def ensures_archives_policies(self): + if not self._already_configured_archive_policies: + for ap in self.archive_policies_definition: + try: + self._gnocchi.archive_policy.get(ap["name"]) + except gnocchi_exc.ArchivePolicyNotFound: + self._gnocchi.archive_policy.create(ap) + self._already_configured_archive_policies = True @property def gnocchi_project_id(self): @@ -290,6 +321,8 @@ return rd, operation def publish_samples(self, data): + self.ensures_archives_policies() + # NOTE(sileht): skip sample generated by gnocchi itself data = [s for s in data if not self._is_gnocchi_activity(s)] data.sort(key=operator.attrgetter('resource_id')) @@ -317,17 +350,20 @@ 'resource_type': rd.cfg['resource_type'], 'resource': {"id": resource_id, "user_id": sample.user_id, - "project_id": sample.project_id, - "metrics": rd.metrics}} + "project_id": sample.project_id}} gnocchi_data[resource_id].setdefault( "resource_extra", {}).update(rd.sample_attributes(sample)) measures.setdefault(resource_id, {}).setdefault( - metric_name, []).append({'timestamp': sample.timestamp, - 'value': sample.volume}) - # TODO(gordc): unit should really be part of metric definition - gnocchi_data[resource_id]['resource']['metrics'][ - metric_name]['unit'] = sample.unit + metric_name, + {"measures": [], + "archive_policy_name": + rd.metrics[metric_name]["archive_policy_name"], + "unit": sample.unit} + )["measures"].append( + {'timestamp': sample.timestamp, + 'value': sample.volume} + ) try: self.batch_measures(measures, gnocchi_data) @@ -343,8 +379,8 @@ if not resource_extra: continue try: - self._if_not_cached("update", resource_type, resource, - self._update_resource, resource_extra) + self._if_not_cached(resource_type, resource['id'], + resource_extra) except gnocchi_exc.ClientException as e: LOG.error(six.text_type(e)) except Exception as e: @@ -375,8 +411,7 @@ for resource_type, resource, resource_extra in resources: try: resource.update(resource_extra) - self._if_not_cached("create", resource_type, resource, - self._create_resource) + self._create_resource(resource_type, resource) except gnocchi_exc.ResourceAlreadyExists: # NOTE(sileht): resource created in the meantime pass @@ -387,6 +422,10 @@ # and we can't patch it later del measures[resource['id']] del resource_infos[resource['id']] + else: + if self.cache and resource_extra: + self.cache.set(resource['id'], + self._hash_resource(resource_extra)) # NOTE(sileht): we have created missing resources/metrics, # now retry to post measures @@ -395,56 +434,46 @@ LOG.debug( "%d measures posted against %d metrics through %d resources", - sum(len(m) for rid in measures for m in measures[rid].values()), + sum(len(m["measures"]) + for rid in measures + for m in measures[rid].values()), sum(len(m) for m in measures.values()), len(resource_infos)) def _create_resource(self, resource_type, resource): self._gnocchi.resource.create(resource_type, resource) LOG.debug('Resource %s created', resource["id"]) - def _update_resource(self, resource_type, resource, resource_extra): - self._gnocchi.resource.update(resource_type, - resource["id"], - resource_extra) - LOG.debug('Resource %s updated', resource["id"]) + def _update_resource(self, resource_type, res_id, resource_extra): + self._gnocchi.resource.update(resource_type, res_id, resource_extra) + LOG.debug('Resource %s updated', res_id) - def _if_not_cached(self, operation, resource_type, resource, method, - *args, **kwargs): + def _if_not_cached(self, resource_type, res_id, resource_extra): if self.cache: - cache_key = resource['id'] - attribute_hash = self._check_resource_cache(cache_key, resource) - hit = False - if attribute_hash: - with self._gnocchi_resource_lock[cache_key]: + attribute_hash = self._hash_resource(resource_extra) + if self._resource_cache_diff(res_id, attribute_hash): + with self._gnocchi_resource_lock[res_id]: # NOTE(luogangyi): there is a possibility that the # resource was already built in cache by another # ceilometer-notification-agent when we get the lock here. - attribute_hash = self._check_resource_cache(cache_key, - resource) - if attribute_hash: - method(resource_type, resource, *args, **kwargs) - self.cache.set(cache_key, attribute_hash) + if self._resource_cache_diff(res_id, attribute_hash): + self._update_resource(resource_type, res_id, + resource_extra) + self.cache.set(res_id, attribute_hash) else: - hit = True - LOG.debug('resource cache recheck hit for ' - '%s %s', operation, cache_key) - self._gnocchi_resource_lock.pop(cache_key, None) + LOG.debug('Resource cache hit for %s', res_id) + self._gnocchi_resource_lock.pop(res_id, None) else: - hit = True - LOG.debug('Resource cache hit for %s %s', operation, cache_key) - if hit and operation == "create": - raise gnocchi_exc.ResourceAlreadyExists() + LOG.debug('Resource cache hit for %s', res_id) else: - method(resource_type, resource, *args, **kwargs) + self._update_resource(resource_type, res_id, resource_extra) - def _check_resource_cache(self, key, resource_data): + @staticmethod + def _hash_resource(resource): + return hash(tuple(i for i in resource.items() if i[0] != 'metrics')) + + def _resource_cache_diff(self, key, attribute_hash): cached_hash = self.cache.get(key) - attribute_hash = hash(tuple(i for i in resource_data.items() - if i[0] != 'metrics')) - if not cached_hash or cached_hash != attribute_hash: - return attribute_hash - else: - return None + return not cached_hash or cached_hash != attribute_hash def publish_events(self, events): for event in events: diff -Nru ceilometer-10.0.1/ceilometer/publisher/http.py ceilometer-11.0.0/ceilometer/publisher/http.py --- ceilometer-10.0.1/ceilometer/publisher/http.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/publisher/http.py 2018-07-30 18:08:00.000000000 +0000 @@ -69,6 +69,8 @@ """ + HEADERS = {'Content-type': 'application/json'} + def __init__(self, conf, parsed_url): super(HttpPublisher, self).__init__(conf, parsed_url) @@ -81,14 +83,12 @@ # is valid, if not, ValueError will be thrown. parsed_url.port - self.headers = {'Content-type': 'application/json'} - # Handling other configuration options in the query string params = urlparse.parse_qs(parsed_url.query) self.timeout = self._get_param(params, 'timeout', 5, int) self.max_retries = self._get_param(params, 'max_retries', 2, int) self.poster = ( - self._do_post if strutils.bool_from_string(self._get_param( + self._batch_post if strutils.bool_from_string(self._get_param( params, 'batch', True)) else self._individual_post) verify_ssl = self._get_param(params, 'verify_ssl', True) try: @@ -124,10 +124,20 @@ 'pool_maxsize': conf.max_parallel_requests} self.session = requests.Session() + if parsed_url.scheme in ["http", "https"]: + scheme = parsed_url.scheme + else: + ssl = self._get_param(params, 'ssl', False) + try: + ssl = strutils.bool_from_string(ssl, strict=True) + except ValueError: + ssl = (ssl or False) + scheme = "https" if ssl else "http" + # authentication & config params have been removed, so use URL with # updated query string self.target = urlparse.urlunsplit([ - parsed_url.scheme, + scheme, netloc, parsed_url.path, urlparse.urlencode(params), @@ -149,17 +159,19 @@ def _individual_post(self, data): for d in data: - self._do_post(d) + self._do_post(json.dumps(data)) - def _do_post(self, data): + def _batch_post(self, data): if not data: LOG.debug('Data set is empty!') return - data = json.dumps(data) + self._do_post(json.dumps(data)) + + def _do_post(self, data): LOG.trace('Message: %s', data) try: res = self.session.post(self.target, data=data, - headers=self.headers, timeout=self.timeout, + headers=self.HEADERS, timeout=self.timeout, auth=self.client_auth, cert=self.client_cert, verify=self.verify_ssl) diff -Nru ceilometer-10.0.1/ceilometer/publisher/__init__.py ceilometer-11.0.0/ceilometer/publisher/__init__.py --- ceilometer-10.0.1/ceilometer/publisher/__init__.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/publisher/__init__.py 2018-07-30 18:08:00.000000000 +0000 @@ -16,11 +16,15 @@ import abc +from oslo_log import log from oslo_utils import netutils import six from stevedore import driver +LOG = log.getLogger(__name__) + + def get_publisher(conf, url, namespace): """Get publisher driver and load it. @@ -29,10 +33,7 @@ """ parse_result = netutils.urlsplit(url) loaded_driver = driver.DriverManager(namespace, parse_result.scheme) - if issubclass(loaded_driver.driver, ConfigPublisherBase): - return loaded_driver.driver(conf, parse_result) - else: - return loaded_driver.driver(parse_result) + return loaded_driver.driver(conf, parse_result) @six.add_metaclass(abc.ABCMeta) diff -Nru ceilometer-10.0.1/ceilometer/publisher/prometheus.py ceilometer-11.0.0/ceilometer/publisher/prometheus.py --- ceilometer-10.0.1/ceilometer/publisher/prometheus.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/publisher/prometheus.py 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,78 @@ +# +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from ceilometer.publisher import http +from ceilometer import sample + + +class PrometheusPublisher(http.HttpPublisher): + """Publish metering data to Prometheus Pushgateway endpoint + + This dispatcher inherits from all options of the http dispatcher. + + To use this publisher for samples, add the following section to the + /etc/ceilometer/pipeline.yaml file or simply add it to an existing + pipeline:: + + - name: meter_file + meters: + - "*" + publishers: + - prometheus://mypushgateway/metrics/job/ceilometer + + """ + + HEADERS = {'Content-type': 'plain/text'} + + def publish_samples(self, samples): + """Send a metering message for publishing + + :param samples: Samples from pipeline after transformation + """ + if not samples: + return + + data = "" + doc_done = set() + for s in samples: + # NOTE(sileht): delta can't be converted into prometheus data + # format so don't set the metric type for it + metric_type = None + if s.type == sample.TYPE_CUMULATIVE: + metric_type = "counter" + elif s.type == sample.TYPE_GAUGE: + metric_type = "gauge" + + if metric_type and s.name not in doc_done: + data += "# TYPE %s %s\n" % (s.name, metric_type) + doc_done.add(s.name) + + # NOTE(sileht): prometheus pushgateway doesn't allow to push + # timestamp_ms + # + # timestamp_ms = ( + # s.get_iso_timestamp().replace(tzinfo=None) - + # datetime.utcfromtimestamp(0) + # ).total_seconds() * 1000 + # data += '%s{resource_id="%s"} %s %d\n' % ( + # s.name, s.resource_id, s.volume, timestamp_ms) + + data += '%s{resource_id="%s"} %s\n' % ( + s.name, s.resource_id, s.volume) + self._do_post(data) + + @staticmethod + def publish_events(events): + raise NotImplementedError diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py ceilometer-11.0.0/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py --- ceilometer-10.0.1/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/compute/virt/hyperv/test_inspector.py 2018-07-30 18:08:00.000000000 +0000 @@ -58,6 +58,27 @@ self._inspector.inspect_instance, mock.sentinel.instance, None) + def _yield_consumer(generator_method, *args, **kwargs): + list(generator_method(*args, **kwargs)) + + self._inspector._utils.get_vnic_metrics.side_effect = ( + os_win_exc.OSWinException) + self.assertRaises(virt_inspector.InspectorException, + _yield_consumer, self._inspector.inspect_vnics, + mock.sentinel.instance, None) + + self._inspector._utils.get_vnic_metrics.side_effect = ( + os_win_exc.HyperVException) + self.assertRaises(virt_inspector.InspectorException, + _yield_consumer, self._inspector.inspect_vnics, + mock.sentinel.instance, None) + + self._inspector._utils.get_vnic_metrics.side_effect = ( + os_win_exc.NotFound(resource='foofoo')) + self.assertRaises(virt_inspector.InstanceNotFoundException, + _yield_consumer, self._inspector.inspect_vnics, + mock.sentinel.instance, None) + def test_assert_original_traceback_maintained(self): def bar(self): foo = "foofoo" diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py ceilometer-11.0.0/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py --- ceilometer-10.0.1/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/compute/virt/libvirt/test_inspector.py 2018-07-30 18:08:00.000000000 +0000 @@ -474,6 +474,27 @@ self.assertIsNone(stats.memory_swap_in) self.assertIsNone(stats.memory_swap_out) + def test_inspect_memory_with_usable(self): + domain = mock.Mock() + domain.info.return_value = (0, 0, 0, 2, 999999) + domain.memoryStats.return_value = {'available': 76800, + 'rss': 30000, + 'swap_in': 5120, + 'swap_out': 8192, + 'unused': 25600, + 'usable': 51200} + conn = mock.Mock() + conn.domainListGetStats.return_value = [({}, {})] + conn.lookupByUUIDString.return_value = domain + + with mock.patch('ceilometer.compute.virt.libvirt.utils.' + 'refresh_libvirt_connection', return_value=conn): + stats = self.inspector.inspect_instance(self.instance, None) + self.assertEqual(25600 / units.Ki, stats.memory_usage) + self.assertEqual(30000 / units.Ki, stats.memory_resident) + self.assertEqual(5120 / units.Ki, stats.memory_swap_in) + self.assertEqual(8192 / units.Ki, stats.memory_swap_out) + def test_inspect_perf_events_libvirt_less_than_2_3_0(self): domain = mock.Mock() domain.info.return_value = (0, 0, 51200, 2, 999999) diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/hardware/inspector/test_snmp.py ceilometer-11.0.0/ceilometer/tests/unit/hardware/inspector/test_snmp.py --- ceilometer-10.0.1/ceilometer/tests/unit/hardware/inspector/test_snmp.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/hardware/inspector/test_snmp.py 2018-07-30 18:08:00.000000000 +0000 @@ -17,7 +17,8 @@ import fixtures import mock from oslo_utils import netutils -from pysnmp.proto.rfc1905 import noSuchObject +from pysnmp.proto import rfc1905 +import six from ceilometer.hardware.inspector import snmp from ceilometer.tests import base as test_base @@ -35,14 +36,18 @@ class FakeCommandGenerator(object): def getCmd(self, authData, transportTarget, *oids, **kwargs): - emptyOID = '1.3.6.1.4.1.2021.4.14.0' + emptyOIDs = { + '1.3.6.1.4.1.2021.4.14.0': rfc1905.noSuchObject, + '1.3.6.1.4.1.2021.4.14.1': rfc1905.noSuchInstance, + } varBinds = [ (FakeObjectName(oid), int(oid.split('.')[-1])) for oid in oids - if oid != emptyOID + if oid not in emptyOIDs ] - if emptyOID in oids: - varBinds += [(FakeObjectName(emptyOID), noSuchObject)] + for emptyOID, exc in six.iteritems(emptyOIDs): + if emptyOID in oids: + varBinds += [(FakeObjectName(emptyOID), exc)] return (None, None, 0, varBinds) def bulkCmd(authData, transportTarget, nonRepeaters, maxRepetitions, @@ -78,6 +83,13 @@ 'metadata': {}, 'post_op': None, }, + 'test_nosuch_instance': { + 'matching_type': snmp.EXACT, + 'metric_oid': ('1.3.6.1.4.1.2021.4.14.1', int), + 'metadata': {}, + 'post_op': None, + }, + } def setUp(self): @@ -124,6 +136,18 @@ except ValueError: self.fail("got ValueError when interpreting NoSuchObject return") + def test_inspect_no_such_instance(self): + cache = {} + try: + # inspect_generic() is a generator, so we explicitly need to + # iterate through it in order to trigger the exception. + list(self.inspector.inspect_generic(self.host, + cache, + {}, + self.mapping['test_nosuch'])) + except ValueError: + self.fail("got ValueError when interpreting NoSuchInstance return") + def test_inspect_generic_exact(self): self.inspector._fake_post_op = self._fake_post_op cache = {} diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/objectstore/test_rgw_client.py ceilometer-11.0.0/ceilometer/tests/unit/objectstore/test_rgw_client.py --- ceilometer-10.0.1/ceilometer/tests/unit/objectstore/test_rgw_client.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/objectstore/test_rgw_client.py 2018-07-30 18:08:11.000000000 +0000 @@ -152,7 +152,7 @@ def setUp(self): super(TestRGWAdminClient, self).setUp() self.client = rgw_client.RGWAdminClient('http://127.0.0.1:8080/admin', - 'abcde', 'secret') + 'abcde', 'secret', False) self.get_resp = mock.MagicMock() self.get = mock.patch('requests.get', return_value=self.get_resp).start() @@ -179,6 +179,24 @@ expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, 'buckets': bucket_list} self.assertEqual(expected, actual) + self.assertEqual(1, len(self.get.call_args_list)) + self.assertEqual('foo', + self.get.call_args_list[0][1]['params']['uid']) + + def test_get_buckets_implicit_tenants(self): + self.get_resp.status_code = 200 + self.get_resp.json.return_value = buckets_json + self.client.implicit_tenants = True + actual = self.client.get_bucket('foo') + bucket_list = [rgw_client.RGWAdminClient.Bucket('somefoo', 1000, 1000), + rgw_client.RGWAdminClient.Bucket('somefoo31', 1, 42), + ] + expected = {'num_buckets': 2, 'size': 1042, 'num_objects': 1001, + 'buckets': bucket_list} + self.assertEqual(expected, actual) + self.assertEqual(1, len(self.get.call_args_list)) + self.assertEqual('foo$foo', + self.get.call_args_list[0][1]['params']['uid']) def test_get_usage(self): self.get_resp.status_code = 200 @@ -186,3 +204,17 @@ actual = self.client.get_usage('foo') expected = 7 self.assertEqual(expected, actual) + self.assertEqual(1, len(self.get.call_args_list)) + self.assertEqual('foo', + self.get.call_args_list[0][1]['params']['uid']) + + def test_get_usage_implicit_tenants(self): + self.get_resp.status_code = 200 + self.get_resp.json.return_value = usage_json + self.client.implicit_tenants = True + actual = self.client.get_usage('foo') + expected = 7 + self.assertEqual(expected, actual) + self.assertEqual(1, len(self.get.call_args_list)) + self.assertEqual('foo$foo', + self.get.call_args_list[0][1]['params']['uid']) diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/polling/test_manager.py ceilometer-11.0.0/ceilometer/tests/unit/polling/test_manager.py --- ceilometer-10.0.1/ceilometer/tests/unit/polling/test_manager.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/polling/test_manager.py 2018-07-30 18:08:11.000000000 +0000 @@ -791,13 +791,17 @@ res_list="[]", source=source_name)) - def test_batching_polled_samples_false(self): + def test_batching_polled_samples_false_deprecated(self): self.CONF.set_override('batch_polled_samples', False) self._batching_samples(4, 4) - def test_batching_polled_samples_true(self): - self.CONF.set_override('batch_polled_samples', True) - self._batching_samples(4, 1) + def test_batching_polled_samples_disable_batch(self): + self.CONF.set_override('batch_size', 0, group='polling') + self._batching_samples(4, 4) + + def test_batching_polled_samples_batch_size(self): + self.CONF.set_override('batch_size', 2, group='polling') + self._batching_samples(4, 2) def test_batching_polled_samples_default(self): self._batching_samples(4, 1) diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/publisher/test_gnocchi.py ceilometer-11.0.0/ceilometer/tests/unit/publisher/test_gnocchi.py --- ceilometer-10.0.1/ceilometer/tests/unit/publisher/test_gnocchi.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/publisher/test_gnocchi.py 2018-07-30 18:08:00.000000000 +0000 @@ -175,6 +175,9 @@ self.useFixture(fixtures.MockPatch( 'ceilometer.keystone_client.get_client', return_value=ks_client)) + self.useFixture(fixtures.MockPatch( + 'gnocchiclient.v1.client.Client', + return_value=mock.Mock())) self.ks_client = ks_client def test_config_load(self): @@ -198,7 +201,7 @@ plugin_manager = extension.ExtensionManager( namespace='ceilometer.event.trait.trait_plugin') rd = gnocchi.ResourcesDefinition( - resource, "low", plugin_manager) + resource, "high", "low", plugin_manager) operation = rd.event_match("image.delete") self.assertEqual('delete', operation) @@ -245,12 +248,14 @@ def _do_test_activity_filter(self, expected_measures, fake_batch): url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) + d._already_checked_archive_policies = True d.publish_samples(self.samples) self.assertEqual(1, len(fake_batch.mock_calls)) measures = fake_batch.mock_calls[0][1][0] self.assertEqual( expected_measures, - sum(len(m) for rid in measures for m in measures[rid].values())) + sum(len(m["measures"]) for rid in measures + for m in measures[rid].values())) def test_activity_filter_match_project_id(self): self.samples[0].project_id = ( @@ -266,7 +271,7 @@ 'filter_project option') def test_activity_filter_match_swift_event(self): - self.samples[0].name = 'storage.api.request' + self.samples[0].name = 'storage.objects.outgoing.bytes' self.samples[0].resource_id = 'a2d42c23-d518-46b6-96ab-3fba2e146859' self._do_test_activity_filter(1) @@ -290,6 +295,7 @@ )] url = netutils.urlsplit("gnocchi://") d = gnocchi.GnocchiPublisher(self.conf.conf, url) + d._already_checked_archive_policies = True d.publish_samples(samples) self.assertEqual(0, len(fake_batch.call_args[0][1])) @@ -310,6 +316,43 @@ testscenarios.TestWithScenarios): sample_scenarios = [ + ('cpu', dict( + sample=sample.Sample( + resource_id=str(uuid.uuid4()) + "_foobar", + name='cpu', + unit='ns', + type=sample.TYPE_CUMULATIVE, + volume=500, + user_id='test_user', + project_id='test_project', + source='openstack', + timestamp='2012-05-08 20:23:48.028195', + resource_metadata={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'instance_flavor_id': 1234, + 'display_name': 'myinstance', + }, + ), + metric_attributes={ + "archive_policy_name": "ceilometer-low-rate", + "unit": "ns", + "measures": [{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': 500 + }] + }, + postable_attributes={ + 'user_id': 'test_user', + 'project_id': 'test_project', + }, + patchable_attributes={ + 'host': 'foo', + 'image_ref': 'imageref!', + 'flavor_id': 1234, + 'display_name': 'myinstance', + }, + resource_type='instance')), ('disk.root.size', dict( sample=sample.Sample( resource_id=str(uuid.uuid4()) + "_foobar", @@ -328,10 +371,14 @@ 'display_name': 'myinstance', }, ), - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': 2 - }], + metric_attributes={ + "archive_policy_name": "ceilometer-low", + "unit": "GB", + "measures": [{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': 2 + }] + }, postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', @@ -342,20 +389,6 @@ 'flavor_id': 1234, 'display_name': 'myinstance', }, - metric_names=[ - 'disk.root.size', 'disk.ephemeral.size', - 'memory', 'vcpus', 'memory.usage', 'memory.resident', - 'memory.swap.in', 'memory.swap.out', - 'memory.bandwidth.total', 'memory.bandwidth.local', - 'cpu', 'cpu.delta', 'cpu_util', 'vcpus', 'disk.read.requests', - 'cpu_l3_cache', 'perf.cpu.cycles', 'perf.instructions', - 'perf.cache.references', 'perf.cache.misses', - 'disk.read.requests.rate', 'disk.write.requests', - 'disk.write.requests.rate', 'disk.read.bytes', - 'disk.read.bytes.rate', 'disk.write.bytes', - 'disk.write.bytes.rate', 'disk.latency', 'disk.iops', - 'disk.capacity', 'disk.allocation', 'disk.usage', - 'compute.instance.booting.time'], resource_type='instance')), ('hardware.ipmi.node.power', dict( sample=sample.Sample( @@ -372,25 +405,20 @@ 'useless': 'not_used', }, ), - measures_attributes=[{ - 'timestamp': '2012-05-08 20:23:48.028195', - 'value': 2 - }], + metric_attributes={ + "archive_policy_name": "ceilometer-low", + "unit": "W", + "measures": [{ + 'timestamp': '2012-05-08 20:23:48.028195', + 'value': 2 + }] + }, postable_attributes={ 'user_id': 'test_user', 'project_id': 'test_project', }, patchable_attributes={ }, - metric_names=[ - 'hardware.ipmi.node.power', 'hardware.ipmi.node.temperature', - 'hardware.ipmi.node.inlet_temperature', - 'hardware.ipmi.node.outlet_temperature', - 'hardware.ipmi.node.fan', 'hardware.ipmi.node.current', - 'hardware.ipmi.node.voltage', 'hardware.ipmi.node.airflow', - 'hardware.ipmi.node.cups', 'hardware.ipmi.node.cpu_util', - 'hardware.ipmi.node.mem_util', 'hardware.ipmi.node.io_util' - ], resource_type='ipmi')), ] @@ -460,9 +488,9 @@ self.useFixture(utils_fixture.TimeFixture(now)) expected_calls = [ - mock.call.resource.search('instance_disk', search_params), mock.call.resource.search('instance_network_interface', search_params), + mock.call.resource.search('instance_disk', search_params), mock.call.resource.update( 'instance', '9f9d01b9-4a58-4271-9e27-398b21ab20d1', {'ended_at': now.isoformat()}), @@ -496,8 +524,6 @@ @mock.patch('ceilometer.publisher.gnocchi.LOG') @mock.patch('gnocchiclient.v1.client.Client') def test_workflow(self, fakeclient_cls, logger): - url = netutils.urlsplit("gnocchi://") - self.publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) fakeclient = fakeclient_cls.return_value @@ -506,8 +532,10 @@ gnocchi_id = uuid.uuid4() expected_calls = [ + mock.call.archive_policy.get("ceilometer-low"), + mock.call.archive_policy.get("ceilometer-low-rate"), mock.call.metric.batch_resources_metrics_measures( - {resource_id: {metric_name: self.measures_attributes}}, + {resource_id: {metric_name: self.metric_attributes}}, create_metrics=True) ] expected_debug = [ @@ -530,15 +558,6 @@ attributes = self.postable_attributes.copy() attributes.update(self.patchable_attributes) attributes['id'] = self.sample.resource_id - attributes['metrics'] = dict((metric_name, {}) - for metric_name in self.metric_names) - for k, v in six.iteritems(attributes['metrics']): - if k == 'disk.root.size': - v['unit'] = 'GB' - continue - if k == 'hardware.ipmi.node.power': - v['unit'] = 'W' - continue expected_calls.append(mock.call.resource.create( self.resource_type, attributes)) @@ -554,7 +573,7 @@ if not self.create_resource_fail: expected_calls.append( mock.call.metric.batch_resources_metrics_measures( - {resource_id: {metric_name: self.measures_attributes}}, + {resource_id: {metric_name: self.metric_attributes}}, create_metrics=True) ) @@ -570,7 +589,8 @@ batch_side_effect += [None] expected_debug.append( mock.call("%d measures posted against %d metrics through %d " - "resources", len(self.measures_attributes), 1, 1) + "resources", len(self.metric_attributes["measures"]), + 1, 1) ) if self.patchable_attributes: @@ -586,7 +606,9 @@ batch = fakeclient.metric.batch_resources_metrics_measures batch.side_effect = batch_side_effect - self.publisher.publish_samples([self.sample]) + url = netutils.urlsplit("gnocchi://") + publisher = gnocchi.GnocchiPublisher(self.conf.conf, url) + publisher.publish_samples([self.sample]) # Check that the last log message is the expected one if (self.post_measure_fail @@ -599,4 +621,5 @@ self.assertEqual(expected_calls, fakeclient.mock_calls) self.assertEqual(expected_debug, logger.debug.mock_calls) + PublisherWorkflowTest.generate_scenarios() diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/publisher/test_prometheus.py ceilometer-11.0.0/ceilometer/tests/unit/publisher/test_prometheus.py --- ceilometer-10.0.1/ceilometer/tests/unit/publisher/test_prometheus.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/publisher/test_prometheus.py 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,132 @@ +# +# Copyright 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for ceilometer/publisher/prometheus.py +""" + +import datetime +import mock +from oslotest import base +import requests +from six.moves.urllib import parse as urlparse +import uuid + +from ceilometer.publisher import prometheus +from ceilometer import sample +from ceilometer import service + + +class TestPrometheusPublisher(base.BaseTestCase): + + resource_id = str(uuid.uuid4()) + + sample_data = [ + sample.Sample( + name='alpha', + type=sample.TYPE_CUMULATIVE, + unit='', + volume=1, + user_id='test', + project_id='test', + resource_id=resource_id, + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='beta', + type=sample.TYPE_DELTA, + unit='', + volume=3, + user_id='test', + project_id='test', + resource_id=resource_id, + timestamp=datetime.datetime.utcnow().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + sample.Sample( + name='gamma', + type=sample.TYPE_GAUGE, + unit='', + volume=5, + user_id='test', + project_id='test', + resource_id=resource_id, + timestamp=datetime.datetime.now().isoformat(), + resource_metadata={'name': 'TestPublish'}, + ), + ] + + def setUp(self): + super(TestPrometheusPublisher, self).setUp() + self.CONF = service.prepare_service([], []) + + def test_post_samples(self): + """Test publisher post.""" + parsed_url = urlparse.urlparse( + 'prometheus://localhost:90/metrics/job/os') + publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url) + + res = requests.Response() + res.status_code = 200 + with mock.patch.object(requests.Session, 'post', + return_value=res) as m_req: + publisher.publish_samples(self.sample_data) + + data = """# TYPE alpha counter +alpha{resource_id="%s"} 1 +beta{resource_id="%s"} 3 +# TYPE gamma gauge +gamma{resource_id="%s"} 5 +""" % (self.resource_id, self.resource_id, self.resource_id) + + expected = [ + mock.call('http://localhost:90/metrics/job/os', + auth=None, + cert=None, + data=data, + headers={'Content-type': 'plain/text'}, + timeout=5, + verify=True) + ] + self.assertEqual(expected, m_req.mock_calls) + + def test_post_samples_ssl(self): + """Test publisher post.""" + parsed_url = urlparse.urlparse( + 'prometheus://localhost:90/metrics/job/os?ssl=1') + publisher = prometheus.PrometheusPublisher(self.CONF, parsed_url) + + res = requests.Response() + res.status_code = 200 + with mock.patch.object(requests.Session, 'post', + return_value=res) as m_req: + publisher.publish_samples(self.sample_data) + + data = """# TYPE alpha counter +alpha{resource_id="%s"} 1 +beta{resource_id="%s"} 3 +# TYPE gamma gauge +gamma{resource_id="%s"} 5 +""" % (self.resource_id, self.resource_id, self.resource_id) + + expected = [ + mock.call('https://localhost:90/metrics/job/os', + auth=None, + cert=None, + data=data, + headers={'Content-type': 'plain/text'}, + timeout=5, + verify=True) + ] + self.assertEqual(expected, m_req.mock_calls) diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/publisher/test_udp.py ceilometer-11.0.0/ceilometer/tests/unit/publisher/test_udp.py --- ceilometer-10.0.1/ceilometer/tests/unit/publisher/test_udp.py 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/publisher/test_udp.py 2018-07-30 18:08:00.000000000 +0000 @@ -16,7 +16,6 @@ """ import datetime -import socket import mock import msgpack @@ -113,42 +112,6 @@ self.CONF = service.prepare_service([], []) self.CONF.publisher.telemetry_secret = 'not-so-secret' - def _check_udp_socket(self, url, expected_addr_family): - with mock.patch.object(socket, 'socket') as mock_socket: - udp.UDPPublisher(self.CONF, netutils.urlsplit(url)) - mock_socket.assert_called_with(expected_addr_family, - socket.SOCK_DGRAM) - - def test_publisher_udp_socket_ipv4(self): - self._check_udp_socket('udp://127.0.0.1:4952', - socket.AF_INET) - - def test_publisher_udp_socket_ipv6(self): - self._check_udp_socket('udp://[::1]:4952', - socket.AF_INET6) - - def test_publisher_udp_socket_ipv4_hostname(self): - host = "ipv4.google.com" - try: - socket.getaddrinfo(host, None, - socket.AF_INET, - socket.SOCK_DGRAM) - except socket.gaierror: - self.skipTest("cannot resolve not running test") - url = "udp://"+host+":4952" - self._check_udp_socket(url, socket.AF_INET) - - def test_publisher_udp_socket_ipv6_hostname(self): - host = "ipv6.google.com" - try: - socket.getaddrinfo(host, None, - socket.AF_INET6, - socket.SOCK_DGRAM) - except socket.gaierror: - self.skipTest("cannot resolve not running test") - url = "udp://"+host+":4952" - self._check_udp_socket(url, socket.AF_INET6) - def test_published(self): self.data_sent = [] with mock.patch('socket.socket', diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/test_bin.py ceilometer-11.0.0/ceilometer/tests/unit/test_bin.py --- ceilometer-10.0.1/ceilometer/tests/unit/test_bin.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/test_bin.py 2018-07-30 18:08:00.000000000 +0000 @@ -110,19 +110,3 @@ break else: self.fail('Did not detect expected warning: %s' % expected) - - def test_polling_namespaces_invalid_value_in_config(self): - content = ("[DEFAULT]\n" - "transport_url = fake://\n" - "polling_namespaces = ['central']\n") - if six.PY3: - content = content.encode('utf-8') - self.tempfile = fileutils.write_to_tempfile(content=content, - prefix='ceilometer', - suffix='.conf') - self.subp = subprocess.Popen( - ["ceilometer-polling", "--config-file=%s" % self.tempfile], - stderr=subprocess.PIPE) - __, err = self.subp.communicate() - self.assertIn(b"Exception: Valid values are ['compute', 'central', " - b"'ipmi'], but found [\"['central']\"]", err) diff -Nru ceilometer-10.0.1/ceilometer/tests/unit/volume/test_cinder.py ceilometer-11.0.0/ceilometer/tests/unit/volume/test_cinder.py --- ceilometer-10.0.1/ceilometer/tests/unit/volume/test_cinder.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/tests/unit/volume/test_cinder.py 2018-07-30 18:08:00.000000000 +0000 @@ -50,6 +50,18 @@ u'multiattach': False, u'source_volid': None, u'consistencygroup_id': None, + u"volume_image_metadata": { + u"checksum": u"17d9daa4fb8e20b0f6b7dec0d46fdddf", + u"container_format": u"bare", + u"disk_format": u"raw", + u"hw_disk_bus": u"scsi", + u"hw_scsi_model": u"virtio-scsi", + u"image_id": u"f0019ee3-523c-45ab-b0b6-3adc529673e7", + u"image_name": u"debian-jessie-scsi", + u"min_disk": u"0", + u"min_ram": u"0", + u"size": u"1572864000" + }, u'os-vol-mig-status-attr:name_id': None, u'name': None, u'bootable': u'false', @@ -71,6 +83,18 @@ u'volume_id': u'6f27bc42-c834-49ea-ae75-8d1073b37806', u'metadata': {}, u'created_at': u'2016-10-19T07:56:55.000000', + u"volume_image_metadata": { + u"checksum": u"17d9daa4fb8e20b0f6b7dec0d46fdddf", + u"container_format": u"bare", + u"disk_format": u"raw", + u"hw_disk_bus": u"scsi", + u"hw_scsi_model": u"virtio-scsi", + u"image_id": u"f0019ee3-523c-45ab-b0b6-3adc529673e7", + u"image_name": u"debian-jessie-scsi", + u"min_disk": u"0", + u"min_ram": u"0", + u"size": u"1572864000" + }, u'name': None}) ] @@ -119,6 +143,11 @@ volume_size_samples[0].project_id) self.assertEqual('d94c18fb-b680-4912-9741-da69ee83c94f', volume_size_samples[0].resource_id) + self.assertEqual('f0019ee3-523c-45ab-b0b6-3adc529673e7', + volume_size_samples[0].resource_metadata["image_id"]) + self.assertEqual('1ae69721-d071-4156-a2bd-b11bb43ec2e3', + volume_size_samples[0].resource_metadata + ["instance_id"]) class TestVolumeSnapshotSizePollster(base.BaseTestCase): @@ -142,6 +171,9 @@ volume_snapshot_size_samples[0].project_id) self.assertEqual('b1ea6783-f952-491e-a4ed-23a6a562e1cf', volume_snapshot_size_samples[0].resource_id) + self.assertEqual('f0019ee3-523c-45ab-b0b6-3adc529673e7', + volume_snapshot_size_samples[0].resource_metadata + ["image_id"]) class TestVolumeBackupSizePollster(base.BaseTestCase): diff -Nru ceilometer-10.0.1/ceilometer/volume/cinder.py ceilometer-11.0.0/ceilometer/volume/cinder.py --- ceilometer-10.0.1/ceilometer/volume/cinder.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/ceilometer/volume/cinder.py 2018-07-30 18:08:00.000000000 +0000 @@ -21,7 +21,16 @@ class _Base(plugin_base.PollsterBase): def extract_metadata(self, obj): - return dict((k, getattr(obj, k)) for k in self.FIELDS) + metadata = dict((k, getattr(obj, k)) for k in self.FIELDS) + if getattr(obj, "volume_image_metadata", None): + metadata["image_id"] = obj.volume_image_metadata.get("image_id") + else: + metadata["image_id"] = None + if getattr(obj, "attachments", None): + metadata["instance_id"] = obj.attachments[0]["server_id"] + else: + metadata["instance_id"] = None + return metadata class VolumeSizePollster(_Base): diff -Nru ceilometer-10.0.1/ceilometer.egg-info/entry_points.txt ceilometer-11.0.0/ceilometer.egg-info/entry_points.txt --- ceilometer-10.0.1/ceilometer.egg-info/entry_points.txt 2018-06-14 13:58:01.000000000 +0000 +++ ceilometer-11.0.0/ceilometer.egg-info/entry_points.txt 2018-07-30 18:10:26.000000000 +0000 @@ -84,12 +84,6 @@ radosgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster radosgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster radosgw.usage = ceilometer.objectstore.rgw:UsagePollster -rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster -rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster -rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster -rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster -rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster -rgw.usage = ceilometer.objectstore.rgw:UsagePollster storage.containers.objects = ceilometer.objectstore.swift:ContainersObjectsPollster storage.containers.objects.size = ceilometer.objectstore.swift:ContainersSizePollster storage.objects = ceilometer.objectstore.swift:ObjectsPollster @@ -207,6 +201,7 @@ http = ceilometer.publisher.http:HttpPublisher https = ceilometer.publisher.http:HttpPublisher notifier = ceilometer.publisher.messaging:SampleNotifierPublisher +prometheus = ceilometer.publisher.prometheus:PrometheusPublisher test = ceilometer.publisher.test:TestPublisher udp = ceilometer.publisher.udp:UDPPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher diff -Nru ceilometer-10.0.1/ceilometer.egg-info/pbr.json ceilometer-11.0.0/ceilometer.egg-info/pbr.json --- ceilometer-10.0.1/ceilometer.egg-info/pbr.json 2018-06-14 13:58:01.000000000 +0000 +++ ceilometer-11.0.0/ceilometer.egg-info/pbr.json 2018-07-30 18:10:26.000000000 +0000 @@ -1 +1 @@ -{"git_version": "f8e7d52", "is_release": true} \ No newline at end of file +{"git_version": "994911d", "is_release": true} \ No newline at end of file diff -Nru ceilometer-10.0.1/ceilometer.egg-info/PKG-INFO ceilometer-11.0.0/ceilometer.egg-info/PKG-INFO --- ceilometer-10.0.1/ceilometer.egg-info/PKG-INFO 2018-06-14 13:58:01.000000000 +0000 +++ ceilometer-11.0.0/ceilometer.egg-info/PKG-INFO 2018-07-30 18:10:26.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: ceilometer -Version: 10.0.1 +Version: 11.0.0 Summary: OpenStack Telemetry Home-page: https://docs.openstack.org/ceilometer/latest/ Author: OpenStack @@ -72,6 +72,6 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Topic :: System :: Monitoring -Provides-Extra: zaqar -Provides-Extra: test Provides-Extra: gnocchi +Provides-Extra: test +Provides-Extra: zaqar diff -Nru ceilometer-10.0.1/ceilometer.egg-info/requires.txt ceilometer-11.0.0/ceilometer.egg-info/requires.txt --- ceilometer-10.0.1/ceilometer.egg-info/requires.txt 2018-06-14 13:58:01.000000000 +0000 +++ ceilometer-11.0.0/ceilometer.egg-info/requires.txt 2018-07-30 18:10:26.000000000 +0000 @@ -1,6 +1,5 @@ cachetools>=1.1.0 cotyledon>=1.3.0 -futures>=3.0 futurist>=0.11.0 debtcollector>=1.2.0 jsonpath-rw-ext>=0.1.9 @@ -32,6 +31,9 @@ tooz[zake]>=1.47.0 os-xenapi>=0.1.1 +[:(python_version=='2.7' or python_version=='2.6')] +futures>=3.0 + [gnocchi] oslo.cache>=1.5.0 gnocchiclient>=7.0.0 @@ -47,13 +49,12 @@ oslo.vmware>=1.16.0 pyOpenSSL>=0.14 sphinx>=1.6.2 -testrepository>=0.0.18 testscenarios>=0.4 testtools>=1.4.0 gabbi>=1.30.0 requests-aws>=0.1.4 -os-testr>=0.4.1 kafka-python>=1.3.2 +stestr>=1.0.0 [zaqar] python-zaqarclient>=1.0.0 diff -Nru ceilometer-10.0.1/ceilometer.egg-info/SOURCES.txt ceilometer-11.0.0/ceilometer.egg-info/SOURCES.txt --- ceilometer-10.0.1/ceilometer.egg-info/SOURCES.txt 2018-06-14 13:58:03.000000000 +0000 +++ ceilometer-11.0.0/ceilometer.egg-info/SOURCES.txt 2018-07-30 18:10:27.000000000 +0000 @@ -1,6 +1,6 @@ .coveragerc .mailmap -.testr.conf +.stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst @@ -12,7 +12,6 @@ babel.cfg bindep.txt requirements.txt -run-tests.sh setup.cfg setup.py test-requirements.txt @@ -152,6 +151,7 @@ ceilometer/publisher/gnocchi.py ceilometer/publisher/http.py ceilometer/publisher/messaging.py +ceilometer/publisher/prometheus.py ceilometer/publisher/test.py ceilometer/publisher/udp.py ceilometer/publisher/utils.py @@ -256,6 +256,7 @@ ceilometer/tests/unit/publisher/test_gnocchi.py ceilometer/tests/unit/publisher/test_http.py ceilometer/tests/unit/publisher/test_messaging_publisher.py +ceilometer/tests/unit/publisher/test_prometheus.py ceilometer/tests/unit/publisher/test_udp.py ceilometer/tests/unit/publisher/test_utils.py ceilometer/tests/unit/publisher/test_zaqar.py @@ -294,7 +295,6 @@ doc/source/contributor/2-2-collection-poll.png doc/source/contributor/2-accessmodel.png doc/source/contributor/3-Pipeline.png -doc/source/contributor/4-Transformer.png doc/source/contributor/5-multi-publish.png doc/source/contributor/6-storagemodel.png doc/source/contributor/architecture.rst @@ -354,12 +354,11 @@ etc/ceilometer/rootwrap.d/ipmi.filters playbooks/legacy/grenade-dsvm-ceilometer/post.yaml playbooks/legacy/grenade-dsvm-ceilometer/run.yaml -playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml -playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml releasenotes/notes/.placeholder releasenotes/notes/add-db-legacy-clean-tool-7b3e3714f414c448.yaml releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml releasenotes/notes/add-full-snmpv3-usm-support-ab540c902fa89b9d.yaml +releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml releasenotes/notes/add-magnum-event-4c75ed0bb268d19c.yaml releasenotes/notes/add-memory-swap-metric-f1633962ab2cf0f6.yaml releasenotes/notes/add-tool-for-migrating-data-to-gnocchi-cea8d4db68ce03d0.yaml @@ -396,6 +395,7 @@ releasenotes/notes/gnocchi-cache-b9ad4d85a1da8d3f.yaml releasenotes/notes/gnocchi-client-42cd992075ee53ab.yaml releasenotes/notes/gnocchi-host-metrics-829bcb965d8f2533.yaml +releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml releasenotes/notes/gnocchi-orchestration-3497c689268df0d1.yaml releasenotes/notes/gnocchi-udp-collector-00415e6674b5cc0f.yaml releasenotes/notes/handle-malformed-resource-definitions-ad4f69f898ced34d.yaml @@ -406,6 +406,7 @@ releasenotes/notes/improve-events-rbac-support-f216bd7f34b02032.yaml releasenotes/notes/index-events-mongodb-63cb04200b03a093.yaml releasenotes/notes/instance-discovery-new-default-7f9b451a515dddf4.yaml +releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml releasenotes/notes/keystone-v3-fab1e257c5672965.yaml releasenotes/notes/kwapi_deprecated-c92b9e72c78365f0.yaml releasenotes/notes/less-nova-polling-ac56687da3f8b1a3.yaml @@ -418,8 +419,10 @@ releasenotes/notes/pecan-debug-removed-dc737efbf911bde7.yaml releasenotes/notes/perf-events-meter-b06c2a915c33bfaf.yaml releasenotes/notes/pipeline-fallback-polling-3d962a0fff49ccdd.yaml +releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml releasenotes/notes/polling-definition-efffb92e3810e571.yaml releasenotes/notes/polling-deprecation-4d5b83180893c053.yaml +releasenotes/notes/prometheus-bcb201cfe46d5778.yaml releasenotes/notes/refresh-legacy-cache-e4dbbd3e2eeca70b.yaml releasenotes/notes/remove-alarms-4df3cdb4f1fb5faa.yaml releasenotes/notes/remove-cadf-http-f8449ced3d2a29d4.yaml @@ -430,6 +433,7 @@ releasenotes/notes/remove-exchange-control-options-75ecd49423639068.yaml releasenotes/notes/remove-file-dispatcher-56ba1066c20d314a.yaml releasenotes/notes/remove-gnocchi-dispatcher-dd588252976c2abb.yaml +releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml releasenotes/notes/remove-http-dispatcher-1afdce1d1dc3158d.yaml releasenotes/notes/remove-kafka-broker-publisher-7026b370cfc831db.yaml releasenotes/notes/remove-nova-http-log-option-64e97a511e58da5d.yaml @@ -437,7 +441,9 @@ releasenotes/notes/remove-refresh-pipeline-618af089c5435db7.yaml releasenotes/notes/remove-rpc-collector-d0d0a354140fd107.yaml releasenotes/notes/remove-shuffle_time_before_polling_task-option-05a4d225236c64b1.yaml +releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml releasenotes/notes/rename-ceilometer-dbsync-eb7a1fa503085528.yaml +releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml releasenotes/notes/scan-domains-for-tenants-8f8c9edcb74cc173.yaml releasenotes/notes/selective-pipeline-notification-47e8a390b1c7dcc4.yaml releasenotes/notes/ship-yaml-files-33aa5852bedba7f0.yaml @@ -454,9 +460,11 @@ releasenotes/notes/support-unique-meter-query-221c6e0c1dc1b726.yaml releasenotes/notes/thread-safe-matching-4a635fc4965c5d4c.yaml releasenotes/notes/tooz-coordination-system-d1054b9d1a5ddf32.yaml +releasenotes/notes/transformer-ed4b1ea7d1752576.yaml releasenotes/notes/unify-timestamp-of-polled-data-fbfcff43cd2d04bc.yaml releasenotes/notes/use-glance-v2-in-image-pollsters-137a315577d5dc4c.yaml releasenotes/notes/use-notification-transport-url-489f3d31dc66c4d2.yaml +releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml releasenotes/notes/zaqar-publisher-f7efa030b71731f4.yaml releasenotes/source/conf.py releasenotes/source/index.rst @@ -465,8 +473,10 @@ releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst +releasenotes/source/queens.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder +releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po +releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po tools/__init__.py -tools/pretty_tox.sh tools/send_test_data.py \ No newline at end of file diff -Nru ceilometer-10.0.1/ChangeLog ceilometer-11.0.0/ChangeLog --- ceilometer-10.0.1/ChangeLog 2018-06-14 13:58:00.000000000 +0000 +++ ceilometer-11.0.0/ChangeLog 2018-07-30 18:10:25.000000000 +0000 @@ -1,18 +1,72 @@ CHANGES ======= -10.0.1 +11.0.0 ------ +* Migrate to stestr for running tests +* Remove unused tox target +* Add gating on py37 +* udp: remove wrong and useless tests +* Add support for Python 3.7 +* publisher: allow to not inherit from ConfigPublisherBase +* Remove deprecated gnocchi\_dispatcher option group +* gnocchi: don't create metrics with resource +* polling: remove useless base class +* Imported Translations from Zanata +* Remove dead link in doc file +* objectstore/rgw: Add config option to support RGW implicit tenants +* Fix broken link to ceph radosgw usage logging docs +* fix tox python3 overrides +* Wrong block format in rst file +* del unused module +* Ability to define batch size off polled samples +* Adjust the controller installation position +* fix typos +* inspector: memory: use usable of memoryStats if available +* snmp: ignore rfc1905.NoSuchInstance result * snmp: make oid value retrieval more solid -* avoid verbose tracebacks on known errors +* Replace Chinese punctuation with English punctuation +* Remove restiction on allowable namespaces in polling * Imported Translations from Zanata -* update ipmi docs -* add disk.device.\*.latency meters +* Imported Translations from Zanata +* Deprecating transformers and pipeline partitioning +* Unlist non existent storage.api.request +* Stop gnocchi during unstack +* publisher: add a Prometheus Pushgateway publisher +* Imported Translations from Zanata +* Remove liusheng and llu from the maintainers +* supplement "zakar" and "https" description +* fix errors about words spelling +* Fix doc title format error +* Modify the empty list ensure method +* Imported Translations from Zanata +* Imported Translations from Zanata +* Imported Translations from Zanata +* fix a typo in documentation +* Imported Translations from Zanata +* Imported Translations from Zanata +* Imported Translations from Zanata +* Don't use gnocchiclient during publisher init +* gnocchi: configure archive policies on Ceilo side +* cinder: link volume to image and instance +* Add new attributes to instance +* Imported Translations from Zanata +* Imported Translations from Zanata +* hyper-v: Converts all os-win exceptions +* remove integration playbooks +* fix meter formating +* the previous patch was missing a 'continue' * Gracefully handle missing metadata in libvirt xml -* ignore compute.instance.update for metrics +* add disk.device.\*.latency meters +* remove ceilometerclient references +* avoid verbose tracebacks on known errors +* update ipmi docs +* add ipmi sensor data to gnocchi +* Fix glossary.rst +* Remove deprecated rgw meters * Use msgpack instead of msgpack-python -* Update .gitreview for stable/queens +* Update reno for stable/queens 10.0.0 ------ @@ -21,11 +75,16 @@ * add hardware.disk.read|write.\* stats * add volume.provider.\* meters to docs * add note explaining gnocchi coordination\_url +* capture cell name * Deprecate aggregated disk.\* metrics on instance +* ignore compute.instance.update for metrics * utils: move hash\_of\_set where it's solely used * utils: move kill\_listeners to ceilometer.notification * Imported Translations from Zanata +* set cache with attributes only +* Cached should not be used when creating gnocchi resources * Zuul: Remove project name +* Remove run-tests.sh * utils: move publisher-only utils functions in publisher * remove repeated host * Imported Translations from Zanata @@ -108,6 +167,7 @@ * remove duplicate polling.yaml setup * remove test\_manager\_exception\_persistency * minor polling cleaning +* Allow requeues in when there is a failure in EventsNotificationEndpoint * Add cpu\_l3\_cache to polling yaml * simplify cache generation * Put configurations to appropriate part diff -Nru ceilometer-10.0.1/debian/changelog ceilometer-11.0.0/debian/changelog --- ceilometer-10.0.1/debian/changelog 2018-06-25 10:21:02.000000000 +0000 +++ ceilometer-11.0.0/debian/changelog 2018-08-07 17:22:17.000000000 +0000 @@ -1,3 +1,12 @@ +ceilometer (1:11.0.0-0ubuntu1) cosmic; urgency=medium + + * New upstream release for OpenStack Rocky. + * d/control: Align (Build-)Depends with upstream. + * d/control: Enable autopkgtest-pkg-python testsuite. + * d/tests/*: Enable Py3 tests for daemons and add shebang tests. + + -- Corey Bryant Tue, 07 Aug 2018 13:22:17 -0400 + ceilometer (1:10.0.1-0ubuntu1) cosmic; urgency=medium * New upstream point release. diff -Nru ceilometer-10.0.1/debian/control ceilometer-11.0.0/debian/control --- ceilometer-10.0.1/debian/control 2018-06-25 10:21:02.000000000 +0000 +++ ceilometer-11.0.0/debian/control 2018-08-07 17:22:17.000000000 +0000 @@ -7,15 +7,15 @@ dh-python, openstack-pkg-tools (>= 23~), python-all, - python-pbr (>= 1.10.0), + python-pbr (>= 1.6), python-setuptools, python3-all, - python3-pbr (>= 1.10.0), + python3-pbr (>= 1.6), python3-setuptools, python3-sphinx (>= 1.6.2), Build-Depends-Indep: python-awsauth (>= 0.1.4), - python-cachetools (>= 2.0.0), + python-cachetools (>= 1.1.0), python-cinderclient (>= 1:1.6.0), python-concurrent.futures (>= 3.0), python-cotyledon (>= 1.3.0), @@ -30,46 +30,47 @@ python-gnocchiclient (>= 4.0.0), python-jsonpath-rw-ext (>= 0.1.9), python-kafka (>= 1.3.2), - python-keystoneauth1 (>= 3.3.0), + python-keystoneauth1 (>= 2.1.0), python-keystoneclient (>= 1:1.6.0), python-lxml (>= 2.3), - python-mock (>= 2.0.0), + python-mock (>= 1.2), python-monotonic, python-msgpack (>= 0.4.0), python-neutronclient (>= 1:4.2.0), python-novaclient (>= 2:2.29.0), python-openssl (>= 0.14), - python-openstackdocstheme (>= 1.18.1), + python-openstackdocstheme (>= 1.11.0), python-os-testr (>= 1.0.0), python-os-win (>= 0.2.3), - python-os-xenapi (>= 0.3.1), + python-os-xenapi (>= 0.1.1), python-oslo.cache (>= 1.26.0), - python-oslo.concurrency (>= 3.25.0), - python-oslo.config (>= 1:5.1.0), - python-oslo.i18n (>= 3.15.3), - python-oslo.log (>= 3.36.0), - python-oslo.messaging (>= 5.29.0), + python-oslo.concurrency (>= 3.5.0), + python-oslo.config (>= 1:3.22.0), + python-oslo.i18n (>= 2.1.0), + python-oslo.log (>= 1.14.0), + python-oslo.messaging (>= 5.12.0), python-oslo.reports (>= 0.6.0), python-oslo.rootwrap (>= 2.0.0), python-oslo.serialization (>= 2.18.0), - python-oslo.utils (>= 3.33.0), + python-oslo.utils (>= 3.5.0), python-oslo.vmware (>= 1.16.0), python-oslotest (>= 1:2.15.0), python-pysnmp4 (>= 4.2.3), python-requests (>= 2.8.1), - python-six (>= 1.10.0), + python-six (>= 1.9.0), + python-stestr (>= 1.0.0), python-stevedore (>= 1:1.9.0), python-swiftclient (>= 1:2.2.0), python-tempest (>= 1:14.0.0), python-tenacity (>= 3.2.1), python-testrepository (>= 0.0.18), python-testscenarios (>= 0.4), - python-testtools (>= 2.2.0), + python-testtools (>= 1.4.0), python-tooz (>= 1.47.0), python-yaml (>= 3.1.0), python-zaqarclient (>= 1.0.0), python3-awsauth (>= 0.1.4), - python3-cachetools (>= 2.0.0), + python3-cachetools (>= 1.1.0), python3-cinderclient (>= 1:1.6.0), python3-cotyledon (>= 1.3.0), python3-coverage (>= 3.6), @@ -83,47 +84,50 @@ python3-gnocchiclient (>= 4.0.0), python3-jsonpath-rw-ext (>= 0.1.9), python3-kafka (>= 1.3.2), - python3-keystoneauth1 (>= 3.3.0), + python3-keystoneauth1 (>= 2.1.0), python3-keystoneclient (>= 1:1.6.0), python3-lxml (>= 2.3), - python3-mock (>= 2.0.0), + python3-mock (>= 1.2), python3-monotonic, python3-msgpack (>= 0.4.0), python3-neutronclient (>= 1:4.2.0), python3-novaclient (>= 2:2.29.0), python3-openssl (>= 0.14), - python3-openstackdocstheme (>= 1.18.1), + python3-openstackdocstheme (>= 1.11.0), python3-os-testr (>= 1.0.0), python3-os-win (>= 0.2.3), - python3-os-xenapi (>= 0.3.1), + python3-os-xenapi (>= 0.1.1), python3-oslo.cache (>= 1.26.0), - python3-oslo.concurrency (>= 3.25.0), - python3-oslo.config (>= 1:5.1.0), - python3-oslo.i18n (>= 3.15.3), - python3-oslo.log (>= 3.36.0), - python3-oslo.messaging (>= 5.29.0), + python3-oslo.concurrency (>= 3.5.0), + python3-oslo.config (>= 1:3.22.0), + python3-oslo.i18n (>= 2.1.0), + python3-oslo.log (>= 1.14.0), + python3-oslo.messaging (>= 5.12.0), python3-oslo.reports (>= 0.6.0), python3-oslo.rootwrap (>= 2.0.0), python3-oslo.serialization (>= 2.18.0), - python3-oslo.utils (>= 3.33.0), + python3-oslo.utils (>= 3.5.0), python3-oslo.vmware (>= 1.16.0), python3-oslotest (>= 1:2.15.0), python3-pysnmp4 (>= 4.2.3), + python3-reno (>= 1.6.2), python3-requests (>= 2.8.1), - python3-six (>= 1.10.0), + python3-six (>= 1.9.0), + python3-stestr (>= 1.0.0), python3-stevedore (>= 1:1.9.0), python3-swiftclient (>= 1:2.2.0), python3-tempest (>= 1:14.0.0), python3-tenacity (>= 3.2.1), python3-testrepository (>= 0.0.18), python3-testscenarios (>= 0.4), - python3-testtools (>= 2.2.0), + python3-testtools (>= 1.4.0), python3-tooz (>= 1.47.0), python3-yaml (>= 3.1.0), python3-zaqarclient (>= 1.0.0), Standards-Version: 4.1.2 Vcs-Browser: https://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ceilometer Vcs-Git: git://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ceilometer +Testsuite: autopkgtest-pkg-python Package: ceilometer-agent-central Architecture: all @@ -236,7 +240,7 @@ Package: python-ceilometer Architecture: all Depends: - python-cachetools (>= 2.0.0), + python-cachetools (>= 1.1.0), python-cinderclient (>= 1:1.6.0), python-concurrent.futures (>= 3.0), python-cotyledon (>= 1.3.0), @@ -246,7 +250,7 @@ python-futurist (>= 0.11.0), python-glanceclient (>= 1:2.0.0), python-jsonpath-rw-ext (>= 0.1.9), - python-keystoneauth1 (>= 3.3.0), + python-keystoneauth1 (>= 2.1.0), python-keystoneclient (>= 1:1.6.0), python-libvirt, python-lxml (>= 2.3), @@ -254,21 +258,21 @@ python-msgpack (>= 0.4.0), python-neutronclient (>= 1:4.2.0), python-novaclient (>= 2:2.29.0), - python-os-xenapi (>= 0.3.1), + python-os-xenapi (>= 0.1.1), python-oslo.cache (>= 1.26.0), - python-oslo.concurrency (>= 3.25.0), - python-oslo.config (>= 1:5.1.0), - python-oslo.i18n (>= 3.15.3), - python-oslo.log (>= 3.36.0), - python-oslo.messaging (>= 5.29.0), + python-oslo.concurrency (>= 3.5.0), + python-oslo.config (>= 1:3.22.0), + python-oslo.i18n (>= 2.1.0), + python-oslo.log (>= 1.14.0), + python-oslo.messaging (>= 5.12.0), python-oslo.reports (>= 0.6.0), python-oslo.rootwrap (>= 2.0.0), python-oslo.serialization (>= 2.18.0), - python-oslo.utils (>= 3.33.0), - python-pbr (>= 1.10.0), + python-oslo.utils (>= 3.5.0), + python-pbr (>= 1.6), python-pysnmp4 (>= 4.2.3), python-requests (>= 2.8.1), - python-six (>= 1.10.0), + python-six (>= 1.9.0), python-stevedore (>= 1:1.9.0), python-swiftclient (>= 1:2.2.0), python-tenacity (>= 3.2.1), @@ -301,7 +305,7 @@ Package: python3-ceilometer Architecture: all Depends: - python3-cachetools (>= 2.0.0), + python3-cachetools (>= 1.1.0), python3-cinderclient (>= 1:1.6.0), python3-cotyledon (>= 1.3.0), python3-croniter, @@ -310,7 +314,7 @@ python3-futurist (>= 0.11.0), python3-glanceclient (>= 1:2.0.0), python3-jsonpath-rw-ext (>= 0.1.9), - python3-keystoneauth1 (>= 3.3.0), + python3-keystoneauth1 (>= 2.1.0), python3-keystoneclient (>= 1:1.6.0), python3-libvirt, python3-lxml (>= 2.3), @@ -318,21 +322,21 @@ python3-msgpack (>= 0.4.0), python3-neutronclient (>= 1:4.2.0), python3-novaclient (>= 2:2.29.0), - python3-os-xenapi (>= 0.3.1), + python3-os-xenapi (>= 0.1.1), python3-oslo.cache (>= 1.26.0), - python3-oslo.concurrency (>= 3.25.0), - python3-oslo.config (>= 1:5.1.0), - python3-oslo.i18n (>= 3.15.3), - python3-oslo.log (>= 3.36.0), - python3-oslo.messaging (>= 5.29.0), + python3-oslo.concurrency (>= 3.5.0), + python3-oslo.config (>= 1:3.22.0), + python3-oslo.i18n (>= 2.1.0), + python3-oslo.log (>= 1.14.0), + python3-oslo.messaging (>= 5.12.0), python3-oslo.reports (>= 0.6.0), python3-oslo.rootwrap (>= 2.0.0), python3-oslo.serialization (>= 2.18.0), - python3-oslo.utils (>= 3.33.0), - python3-pbr (>= 1.10.0), + python3-oslo.utils (>= 3.5.0), + python3-pbr (>= 1.6), python3-pysnmp4 (>= 4.2.3), python3-requests (>= 2.8.1), - python3-six (>= 1.10.0), + python3-six (>= 1.9.0), python3-stevedore (>= 1:1.9.0), python3-swiftclient (>= 1:2.2.0), python3-tenacity (>= 3.2.1), diff -Nru ceilometer-10.0.1/debian/tests/ceilometer-shebangs-py2 ceilometer-11.0.0/debian/tests/ceilometer-shebangs-py2 --- ceilometer-10.0.1/debian/tests/ceilometer-shebangs-py2 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/debian/tests/ceilometer-shebangs-py2 2018-08-07 17:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +#!/bin/bash +#--------------------- +# Testing /usr/bin/ceilometer-* shebangs +#--------------------- +set -e + +BINARIES=('ceilometer-polling' 'ceilometer-agent-notification' \ + 'ceilometer-send-sample' 'ceilometer-upgrade' 'ceilometer-rootwrap') + +ret=0 + +for binary in "${BINARIES[@]}"; do + if ! `dirname $0`/test-shebang.py $binary python2.7; then + ret=1 + fi +done + +exit $ret diff -Nru ceilometer-10.0.1/debian/tests/ceilometer-shebangs-py3 ceilometer-11.0.0/debian/tests/ceilometer-shebangs-py3 --- ceilometer-10.0.1/debian/tests/ceilometer-shebangs-py3 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/debian/tests/ceilometer-shebangs-py3 2018-08-07 17:22:17.000000000 +0000 @@ -0,0 +1,18 @@ +#!/bin/bash +#--------------------- +# Testing /usr/bin/ceilometer-* shebangs +#--------------------- +set -e + +BINARIES=('ceilometer-polling' 'ceilometer-agent-notification' \ + 'ceilometer-send-sample' 'ceilometer-upgrade' 'ceilometer-rootwrap') + +ret=0 + +for binary in "${BINARIES[@]}"; do + if ! `dirname $0`/test-shebang.py $binary python3.6; then + ret=1 + fi +done + +exit $ret diff -Nru ceilometer-10.0.1/debian/tests/control ceilometer-11.0.0/debian/tests/control --- ceilometer-10.0.1/debian/tests/control 2018-06-25 10:21:02.000000000 +0000 +++ ceilometer-11.0.0/debian/tests/control 2018-08-07 17:22:17.000000000 +0000 @@ -1,2 +1,9 @@ -Tests: test-services test-python-ceilometer -Depends: python-ceilometer, ceilometer-common, ceilometer-agent-compute, ceilometer-agent-central, ceilometer-agent-ipmi, ceilometer-agent-notification, ceilometer-polling, rabbitmq-server, python-libvirt, libvirt-daemon-system +Tests: test-services ceilometer-shebangs-py3 +Depends: python3-ceilometer, ceilometer-common, ceilometer-agent-compute, + ceilometer-agent-central, ceilometer-agent-ipmi, ceilometer-agent-notification, + ceilometer-polling, rabbitmq-server, python-libvirt, libvirt-daemon-system + +Tests: test-services ceilometer-shebangs-py2 +Depends: ceilometer-common, ceilometer-agent-compute, ceilometer-agent-central, + ceilometer-agent-ipmi, ceilometer-agent-notification, ceilometer-polling, + rabbitmq-server, python-libvirt, libvirt-daemon-system diff -Nru ceilometer-10.0.1/debian/tests/test-python-ceilometer ceilometer-11.0.0/debian/tests/test-python-ceilometer --- ceilometer-10.0.1/debian/tests/test-python-ceilometer 2018-06-25 10:21:02.000000000 +0000 +++ ceilometer-11.0.0/debian/tests/test-python-ceilometer 1970-01-01 00:00:00.000000000 +0000 @@ -1,6 +0,0 @@ -#!/usr/bin/python -#------------------------- -# Testing client utilities -#------------------------- -import ceilometer -print "Imported ceilometer module OK" diff -Nru ceilometer-10.0.1/debian/tests/test-shebang.py ceilometer-11.0.0/debian/tests/test-shebang.py --- ceilometer-10.0.1/debian/tests/test-shebang.py 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/debian/tests/test-shebang.py 2018-08-07 17:22:17.000000000 +0000 @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +""" +Test Python shebang in /usr/bin/ binary +""" +import sys + +ret = 0 +bin_path = "/usr/bin/{}".format(sys.argv[1]) +shebang = "#!/usr/bin/{}".format(sys.argv[2]) + +with open(bin_path) as f: + first_line = f.readline().rstrip().replace(" ", "") + if first_line != shebang: + print("ERROR: shebang '{}' not found in {}".format(shebang, bin_path)) + ret = 1 + else: + print("OK") + +sys.exit(ret) diff -Nru ceilometer-10.0.1/devstack/plugin.sh ceilometer-11.0.0/devstack/plugin.sh --- ceilometer-10.0.1/devstack/plugin.sh 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/devstack/plugin.sh 2018-07-30 18:08:00.000000000 +0000 @@ -377,8 +377,8 @@ # stop_ceilometer() - Stop running processes function stop_ceilometer { - # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification; do + # Kill the ceilometer and gnocchi services + for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification gnocchi-api gnocchi-metricd; do stop_process $serv done } diff -Nru ceilometer-10.0.1/devstack/settings ceilometer-11.0.0/devstack/settings --- ceilometer-10.0.1/devstack/settings 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/devstack/settings 2018-07-30 18:08:00.000000000 +0000 @@ -48,10 +48,7 @@ CEILOMETER_EVENT_ALARM=${CEILOMETER_EVENT_ALARM:-False} -# Set up default directories for client and middleware -GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} -GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} -GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient +# Set up default directories for middleware GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # Make sure panko plugin is enabled before ceilometer diff -Nru ceilometer-10.0.1/doc/source/admin/index.rst ceilometer-11.0.0/doc/source/admin/index.rst --- ceilometer-10.0.1/doc/source/admin/index.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/admin/index.rst 2018-07-30 18:08:00.000000000 +0000 @@ -1,8 +1,8 @@ .. _admin: -===================== - Administrator Guide -===================== +=================== +Administrator Guide +=================== Overview diff -Nru ceilometer-10.0.1/doc/source/admin/telemetry-best-practices.rst ceilometer-11.0.0/doc/source/admin/telemetry-best-practices.rst --- ceilometer-10.0.1/doc/source/admin/telemetry-best-practices.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/admin/telemetry-best-practices.rst 2018-07-30 18:08:00.000000000 +0000 @@ -27,9 +27,3 @@ central and compute agents as necessary. The agents are designed to scale horizontally. For more information refer to the `high availability guide `_. - -#. `workload_partitioning` of notification agents is only required if - the pipeline configuration leverages transformers. It may also be enabled if - batching is required to minimize load on the defined publisher targets. If - transformers are not enabled, multiple agents may still be deployed without - `workload_partitioning` and processing will be done greedily. diff -Nru ceilometer-10.0.1/doc/source/admin/telemetry-data-collection.rst ceilometer-11.0.0/doc/source/admin/telemetry-data-collection.rst --- ceilometer-10.0.1/doc/source/admin/telemetry-data-collection.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/admin/telemetry-data-collection.rst 2018-07-30 18:08:00.000000000 +0000 @@ -39,10 +39,9 @@ samples. To enable selective data models, set the required pipelines using `pipelines` option under the `[notification]` section. -Additionally, the notification agent is responsible for all data processing -such as transformations and publishing. After processing, the data is sent -to any supported publisher target such as gnocchi or panko. These services -persist the data in configured databases. +Additionally, the notification agent is responsible to send to any supported +publisher target such as gnocchi or panko. These services persist the data in +configured databases. The different OpenStack services emit several notifications about the various types of events that happen in the system during normal diff -Nru ceilometer-10.0.1/doc/source/admin/telemetry-data-pipelines.rst ceilometer-11.0.0/doc/source/admin/telemetry-data-pipelines.rst --- ceilometer-10.0.1/doc/source/admin/telemetry-data-pipelines.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/admin/telemetry-data-pipelines.rst 2018-07-30 18:08:00.000000000 +0000 @@ -6,7 +6,7 @@ The mechanism by which data is processed is called a pipeline. Pipelines, at the configuration level, describe a coupling between sources of data and -the corresponding sinks for transformation and publication of data. This +the corresponding sinks for publication of data. This functionality is handled by the notification agents. A source is a producer of data: ``samples`` or ``events``. In effect, it is a @@ -17,13 +17,9 @@ to one or more sinks for publication. A sink, on the other hand, is a consumer of data, providing logic for -the transformation and publication of data emitted from related sources. +the publication of data emitted from related sources. -In effect, a sink describes a chain of handlers. The chain starts with -zero or more transformers and ends with one or more publishers. The -first transformer in the chain is passed data from the corresponding -source, takes some action such as deriving rate of change, performing -unit conversion, or aggregating, before publishing_. +In effect, a sink describes a list of one or more publishers. .. _telemetry-pipeline-configuration: @@ -52,7 +48,6 @@ - 'sink name' sinks: - name: 'sink name' - transformers: 'definition of transformers' publishers: - 'list of publishers' @@ -97,30 +92,8 @@ same pipeline. Wildcard and included meters cannot co-exist in the same pipeline definition section. -The transformers section of a pipeline sink provides the possibility to -add a list of transformer definitions. The available transformers are: - -.. list-table:: - :widths: 50 50 - :header-rows: 1 - - * - Name of transformer - - Reference name for configuration - * - Accumulator - - accumulator - * - Aggregator - - aggregator - * - Arithmetic - - arithmetic - * - Rate of change - - rate\_of\_change - * - Unit conversion - - unit\_conversion - * - Delta - - delta - The publishers section contains the list of publishers, where the -samples data should be sent after the possible transformations. +samples data should be sent. Similarly, the event pipeline definition looks like: @@ -140,229 +113,6 @@ The event filter uses the same filtering logic as the meter pipeline. -.. _telemetry-transformers: - -Transformers ------------- - -.. note:: - - Transformers maintain data in memory and therefore do not guarantee - durability in certain scenarios. A more durable and efficient solution - may be achieved post-storage using solutions like Gnocchi. - -The definition of transformers can contain the following fields: - -name - Name of the transformer. - -parameters - Parameters of the transformer. - -The parameters section can contain transformer specific fields, like -source and target fields with different subfields in case of the rate of -change, which depends on the implementation of the transformer. - -The following are supported transformers: - -Rate of change transformer -`````````````````````````` -Transformer that computes the change in value between two data points in time. -In the case of the transformer that creates the ``cpu_util`` meter, the -definition looks like: - -.. code-block:: yaml - - transformers: - - name: "rate_of_change" - parameters: - target: - name: "cpu_util" - unit: "%" - type: "gauge" - scale: "100.0 / (10**9 * (resource_metadata.cpu_number or 1))" - -The rate of change transformer generates the ``cpu_util`` meter -from the sample values of the ``cpu`` counter, which represents -cumulative CPU time in nanoseconds. The transformer definition above -defines a scale factor (for nanoseconds and multiple CPUs), which is -applied before the transformation derives a sequence of gauge samples -with unit ``%``, from sequential values of the ``cpu`` meter. - -The definition for the disk I/O rate, which is also generated by the -rate of change transformer: - -.. code-block:: yaml - - transformers: - - name: "rate_of_change" - parameters: - source: - map_from: - name: "disk\\.(read|write)\\.(bytes|requests)" - unit: "(B|request)" - target: - map_to: - name: "disk.\\1.\\2.rate" - unit: "\\1/s" - type: "gauge" - -Unit conversion transformer -``````````````````````````` - -Transformer to apply a unit conversion. It takes the volume of the meter -and multiplies it with the given ``scale`` expression. Also supports -``map_from`` and ``map_to`` like the rate of change transformer. - -Sample configuration: - -.. code-block:: yaml - - transformers: - - name: "unit_conversion" - parameters: - target: - name: "disk.kilobytes" - unit: "KB" - scale: "volume * 1.0 / 1024.0" - -With ``map_from`` and ``map_to``: - -.. code-block:: yaml - - transformers: - - name: "unit_conversion" - parameters: - source: - map_from: - name: "disk\\.(read|write)\\.bytes" - target: - map_to: - name: "disk.\\1.kilobytes" - scale: "volume * 1.0 / 1024.0" - unit: "KB" - -Aggregator transformer -`````````````````````` - -A transformer that sums up the incoming samples until enough samples -have come in or a timeout has been reached. - -Timeout can be specified with the ``retention_time`` option. If you want -to flush the aggregation, after a set number of samples have been -aggregated, specify the size parameter. - -The volume of the created sample is the sum of the volumes of samples -that came into the transformer. Samples can be aggregated by the -attributes ``project_id``, ``user_id`` and ``resource_metadata``. To aggregate -by the chosen attributes, specify them in the configuration and set which -value of the attribute to take for the new sample (first to take the -first sample's attribute, last to take the last sample's attribute, and -drop to discard the attribute). - -To aggregate 60s worth of samples by ``resource_metadata`` and keep the -``resource_metadata`` of the latest received sample: - -.. code-block:: yaml - - transformers: - - name: "aggregator" - parameters: - retention_time: 60 - resource_metadata: last - -To aggregate each 15 samples by ``user_id`` and ``resource_metadata`` and keep -the ``user_id`` of the first received sample and drop the -``resource_metadata``: - -.. code-block:: yaml - - transformers: - - name: "aggregator" - parameters: - size: 15 - user_id: first - resource_metadata: drop - -Accumulator transformer -``````````````````````` - -This transformer simply caches the samples until enough samples have -arrived and then flushes them all down the pipeline at once: - -.. code-block:: yaml - - transformers: - - name: "accumulator" - parameters: - size: 15 - -Multi meter arithmetic transformer -`````````````````````````````````` - -This transformer enables us to perform arithmetic calculations over one -or more meters and/or their metadata, for example:: - - memory_util = 100 * memory.usage / memory - -A new sample is created with the properties described in the ``target`` -section of the transformer's configuration. The sample's -volume is the result of the provided expression. The calculation is -performed on samples from the same resource. - -.. note:: - - The calculation is limited to meters with the same interval. - -Example configuration: - -.. code-block:: yaml - - transformers: - - name: "arithmetic" - parameters: - target: - name: "memory_util" - unit: "%" - type: "gauge" - expr: "100 * $(memory.usage) / $(memory)" - -To demonstrate the use of metadata, the following implementation of a -novel meter shows average CPU time per core: - -.. code-block:: yaml - - transformers: - - name: "arithmetic" - parameters: - target: - name: "avg_cpu_per_core" - unit: "ns" - type: "cumulative" - expr: "$(cpu) / ($(cpu).resource_metadata.cpu_number or 1)" - -.. note:: - - Expression evaluation gracefully handles NaNs and exceptions. In - such a case it does not create a new sample but only logs a warning. - -Delta transformer -````````````````` - -This transformer calculates the change between two sample datapoints of a -resource. It can be configured to capture only the positive growth deltas. - -Example configuration: - -.. code-block:: yaml - - transformers: - - name: "delta" - parameters: - target: - name: "cpu.delta" - growth_only: True - .. _publishing: Publishers @@ -396,6 +146,29 @@ More details on how to enable and configure gnocchi can be found on its `official documentation page `__. +prometheus +`````````` + +Metering data can be send to the `pushgateway +`__ of Prometheus by using: + +``prometheus://pushgateway-host:9091/metrics/job/openstack-telemetry`` + +With this publisher, timestamp are not sent to Prometheus due to Prometheus +Pushgateway design. All timestamps are set at the time it scrapes the metrics +from the Pushgateway and not when the metric was polled on the OpenStack +services. + +In order to get timeseries in Prometheus that looks like the reality (but with +the lag added by the Prometheus scrapping mechanism). The `scrape_interval` for +the pushgateway must be lower and a multiple of the Ceilometer polling +interval. + +You can read more `here `__ + +Due to this, this is not recommended to use this publisher for billing purpose +as timestamps in Prometheus will not be exact. + panko ````` @@ -484,7 +257,7 @@ with the publisher declaration. For example, additional configuration options can be passed in: ``http://localhost:80/?option1=value1&option2=value2`` -The following options are availble: +The following options are available: ``timeout`` The number of seconds before HTTP request times out. @@ -510,33 +283,3 @@ - panko:// - udp://10.0.0.2:1234 - notifier://?policy=drop&max_queue_length=512&topic=custom_target - -Pipeline Partitioning -~~~~~~~~~~~~~~~~~~~~~ - -.. note:: - - Partitioning is only required if pipelines contain transformations. It has - secondary benefit of supporting batching in certain publishers. - -On large workloads, multiple notification agents can be deployed to handle the -flood of incoming messages from monitored services. If transformations are -enabled in the pipeline, the notification agents must be coordinated to ensure -related messages are routed to the same agent. To enable coordination, set the -``workload_partitioning`` value in ``notification`` section. - -To distribute messages across agents, ``pipeline_processing_queues`` option -should be set. This value defines how many pipeline queues to create which will -then be distributed to the active notification agents. It is recommended that -the number of processing queues, at the very least, match the number of agents. - -Increasing the number of processing queues will improve the distribution of -messages across the agents. It will also help batching which minimises the -requests to Gnocchi storage backend. It will also increase the load the on -message queue as it uses the queue to shard data. - -.. warning:: - - Decreasing the number of processing queues may result in lost data as any - previously created queues may no longer be assigned to active agents. It - is only recommended that you **increase** processing queues. diff -Nru ceilometer-10.0.1/doc/source/admin/telemetry-measurements.rst ceilometer-11.0.0/doc/source/admin/telemetry-measurements.rst --- ceilometer-10.0.1/doc/source/admin/telemetry-measurements.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/admin/telemetry-measurements.rst 2018-07-30 18:08:00.000000000 +0000 @@ -21,9 +21,7 @@ order to be able to collect all the samples you need. For further information about configuration requirements see the `Telemetry chapter `__ - in the Installation Tutorials and Guides. Also check the `Telemetry manual - installation `__ - description. + in the Installation Tutorials and Guides. Telemetry uses the following meter types: @@ -324,12 +322,12 @@ | **Meters added in the Pike release** | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Cumul\| | | | | | -| | ative | MB | instance | Pollster | Libvirt | Memory swap in | -| swap.in | | | ID | | | | +| swap.in | ative | MB | instance | Pollster | Libvirt | Memory swap in | +| | | | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ | memory.\ | Cumul\| | | | | | -| | ative | MB | instance | Pollster | Libvirt | Memory swap out | -| swap.out | | | ID | | | | +| swap.out | ative | MB | instance | Pollster | Libvirt | Memory swap out | +| | | | ID | | | | +-----------+-------+------+----------+----------+---------+------------------+ | **Meters added in the Queens release** | +-----------+-------+------+----------+----------+---------+------------------+ @@ -356,12 +354,11 @@ To enable libvirt ``disk.*`` support when running on RBD-backed shared storage, you need to install libvirt version 1.2.16+. -The Telemetry service supports creating new meters by using -transformers. For more details about transformers see -:ref:`telemetry-transformers`. Among the meters gathered from libvirt and -Hyper-V, there are a few which are derived from other meters. The list of -meters that are created by using the ``rate_of_change`` transformer from the -above table is the following: +The Telemetry service supports creating new meters by using transformers, but +this is deprecated and discouraged to use. Among the meters gathered from +libvirt and Hyper-V, there are a few which are derived from other meters. The +list of meters that are created by using the ``rate_of_change`` transformer +from the above table is the following: - cpu_util @@ -393,10 +390,13 @@ .. note:: - If storing data in Gnocchi v4.1+, derived rate_of_change metrics can be - computed using Gnocchi rather than Ceilometer transformers. This will - minimize Ceilometer memory requirements and avoid missing data when - Ceilometer services restart. + If storing data in Gnocchi, derived rate_of_change metrics are also + computed using Gnocchi in addition to Ceilometer transformers. It avoids + missing data when Ceilometer services restart. + To minimize Ceilometer memory requirements transformers can be disabled. + These ``rate_of_change`` meters are deprecated and will be removed in + default Ceilometer configuration in future release. + OpenStack Compute is capable of collecting ``CPU`` related meters from the compute host machines. In order to use that you need to set the @@ -754,11 +754,6 @@ | storage.objects.ou\| Delta | B | storage ID | Notific\| Number of outgo\| | tgoing.bytes | | | | ation | ing bytes | +--------------------+-------+-------+------------+---------+-----------------+ -| storage.api.request| Delta | requ\ | storage ID | Notific\| Number of API r\| -| | | est | | ation | equests against | -| | | | | | OpenStack Obje\ | -| | | | | | ct Storage | -+--------------------+-------+-------+------------+---------+-----------------+ | storage.containers\| Gauge | object| storage ID\| Pollster| Number of objec\| | .objects | | | /container | | ts in container | +--------------------+-------+-------+------------+---------+-----------------+ @@ -773,7 +768,7 @@ In order to gather meters from Ceph, you have to install and configure the Ceph Object Gateway (radosgw) as it is described in the `Installation Manual `__. You also have to enable -`usage logging `__ in +`usage logging `__ in order to get the related meters from Ceph. You will need an ``admin`` user with ``users``, ``buckets``, ``metadata`` and ``usage`` ``caps`` configured. diff -Nru ceilometer-10.0.1/doc/source/conf.py ceilometer-11.0.0/doc/source/conf.py --- ceilometer-10.0.1/doc/source/conf.py 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/conf.py 2018-07-30 18:08:00.000000000 +0000 @@ -11,10 +11,8 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import subprocess import sys import os -import warnings BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) Binary files /tmp/tmpryEMWL/kfX1xGI8qK/ceilometer-10.0.1/doc/source/contributor/3-Pipeline.png and /tmp/tmpryEMWL/oTsAHLPAyV/ceilometer-11.0.0/doc/source/contributor/3-Pipeline.png differ Binary files /tmp/tmpryEMWL/kfX1xGI8qK/ceilometer-10.0.1/doc/source/contributor/4-Transformer.png and /tmp/tmpryEMWL/oTsAHLPAyV/ceilometer-11.0.0/doc/source/contributor/4-Transformer.png differ diff -Nru ceilometer-10.0.1/doc/source/contributor/architecture.rst ceilometer-11.0.0/doc/source/contributor/architecture.rst --- ceilometer-10.0.1/doc/source/contributor/architecture.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/contributor/architecture.rst 2018-07-30 18:08:00.000000000 +0000 @@ -1,8 +1,8 @@ .. _architecture: -===================== - System Architecture -===================== +=================== +System Architecture +=================== .. index:: single: agent; architecture @@ -154,27 +154,6 @@ it, and publish it in various combinations via multiple pipelines. This functionality is handled by the notification agents. -Transforming the data ---------------------- - -.. figure:: ./4-Transformer.png - :width: 100% - :align: center - :alt: Transformer example - - Example of aggregation of multiple cpu time usage samples in a single - cpu percentage sample. - -The data gathered from the polling and notifications agents contains a wealth -of data and if combined with historical or temporal context, can be used to -derive even more data. Ceilometer offers various transformers which can be used -to manipulate data in the pipeline. - -.. note:: - - The equivalent functionality can be handled more stably by storage - drivers such as Gnocchi. - Publishing the data ------------------- @@ -185,7 +164,7 @@ This figure shows how a sample can be published to multiple destinations. -Currently, processed data can be published using 7 different transports: +Currently, processed data can be published using 8 different transports: 1. gnocchi, which publishes samples/events to Gnocchi API; 2. notifier, a notification based publisher which pushes samples to a message @@ -193,6 +172,10 @@ 3. udp, which publishes samples using UDP packets; 4. http, which targets a REST interface; 5. file, which publishes samples to a file with specified name and location; +6. zaqar, a multi-tenant cloud messaging and notification service for web and + mobile developers; +7. https, which is http over SSL and targets a REST interface. +8. prometheus, which publishes samples to Prometheus Pushgateway Storing/Accessing the data diff -Nru ceilometer-10.0.1/doc/source/contributor/devstack.rst ceilometer-11.0.0/doc/source/contributor/devstack.rst --- ceilometer-10.0.1/doc/source/contributor/devstack.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/contributor/devstack.rst 2018-07-30 18:08:00.000000000 +0000 @@ -1,6 +1,6 @@ -=============================== - Installing development sandbox -=============================== +============================== +Installing development sandbox +============================== In a development environment created by devstack_, Ceilometer can be tested alongside other OpenStack services. diff -Nru ceilometer-10.0.1/doc/source/contributor/events.rst ceilometer-11.0.0/doc/source/contributor/events.rst --- ceilometer-10.0.1/doc/source/contributor/events.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/contributor/events.rst 2018-07-30 18:08:00.000000000 +0000 @@ -15,9 +15,9 @@ .. _events: -============================= - Events and Event Processing -============================= +=========================== +Events and Event Processing +=========================== Events vs. Samples ~~~~~~~~~~~~~~~~~~ diff -Nru ceilometer-10.0.1/doc/source/contributor/measurements.rst ceilometer-11.0.0/doc/source/contributor/measurements.rst --- ceilometer-10.0.1/doc/source/contributor/measurements.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/contributor/measurements.rst 2018-07-30 18:08:00.000000000 +0000 @@ -15,9 +15,9 @@ .. _measurements: -============== - Measurements -============== +============ +Measurements +============ Existing meters =============== diff -Nru ceilometer-10.0.1/doc/source/contributor/new_resource_types.rst ceilometer-11.0.0/doc/source/contributor/new_resource_types.rst --- ceilometer-10.0.1/doc/source/contributor/new_resource_types.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/contributor/new_resource_types.rst 2018-07-30 18:08:00.000000000 +0000 @@ -15,9 +15,9 @@ .. _add_new_resource_types: -================================= - Ceilometer + Gnocchi Integration -================================= +================================ +Ceilometer + Gnocchi Integration +================================ .. warning:: diff -Nru ceilometer-10.0.1/doc/source/contributor/plugins.rst ceilometer-11.0.0/doc/source/contributor/plugins.rst --- ceilometer-10.0.1/doc/source/contributor/plugins.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/contributor/plugins.rst 2018-07-30 18:08:00.000000000 +0000 @@ -13,9 +13,9 @@ License for the specific language governing permissions and limitations under the License. -======================= - Writing Agent Plugins -======================= +===================== +Writing Agent Plugins +===================== This documentation gives you some clues on how to write a new agent or plugin for Ceilometer if you wish to instrument a measurement which @@ -56,7 +56,7 @@ Currently we keep separate namespaces - ``ceilometer.poll.compute`` and ``ceilometer.poll.central`` for quick separation of what to poll depending on where is polling agent running. For example, this will load, among others, -the :class:`ceilometer.compute.pollsters.cpu.CPUPollster` +the :class:`ceilometer.compute.pollsters.instance_stats.CPUPollster` Pollster -------- @@ -68,12 +68,12 @@ :file:`ceilometer/sample.py` file. Compute plugins are defined as subclasses of the -:class:`ceilometer.compute.pollsters.BaseComputePollster` class as defined in -the :file:`ceilometer/compute/pollsters/__init__.py` file. +:class:`ceilometer.compute.pollsters.GenericComputePollster` class as defined +in the :file:`ceilometer/compute/pollsters/__init__.py` file. For example, in the ``CPUPollster`` plugin, the ``get_samples`` method takes -in a given list of resources representating instances on the local host, loops -through them and retrieves the `cputime` details from resource. Similarly, +in a given list of resources representing instances on the local host, loops +through them and retrieves the `cpu time` details from resource. Similarly, other metrics are built by pulling the appropriate value from the given list of resources. @@ -93,16 +93,16 @@ Additionally, it must set ``get_main_endpoints`` which provides endpoints to be added to the main queue listener in the notification agent. This main queue endpoint inherits :class:`ceilometer.pipeline.base.MainNotificationEndpoint` -and is defines which notification priorites to listen, normalises the data, +and defines which notification priorities to listen, normalises the data, and redirects the data for pipeline processing or requeuing depending on `workload_partitioning` configuration. If a pipeline is configured to support `workload_partitioning`, data from the -main queue endpoints are sharded and requeued in internal queues. The +main queue endpoints are shared and requeued in internal queues. The notification agent configures a second notification consumer to handle these internal queues and pushes data to endpoints defined by ``get_interim_endpoints`` in the pipeline manager. These interim endpoints -define how to handle the sharded, normalised data models for pipeline +define how to handle the shared, normalised data models for pipeline processing Both main queue and interim queue notification endpoints should implement: @@ -110,7 +110,7 @@ ``event_types`` A sequence of strings defining the event types the endpoint should handle -``process_notifications(self, priority, message)`` +``process_notifications(self, priority, notifications)`` Receives an event message from the list provided to ``event_types`` and returns a sequence of objects. Using the SampleEndpoint, it should yield ``Sample`` objects as defined in the :file:`ceilometer/sample.py` file. diff -Nru ceilometer-10.0.1/doc/source/glossary.rst ceilometer-11.0.0/doc/source/glossary.rst --- ceilometer-10.0.1/doc/source/glossary.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/glossary.rst 2018-07-30 18:08:00.000000000 +0000 @@ -14,9 +14,9 @@ License for the specific language governing permissions and limitations under the License. -========== - Glossary -========== +======== +Glossary +======== .. glossary:: @@ -34,12 +34,6 @@ notification bus and transforms them into Ceilometer samples. This is the preferred method of data collection. - ceilometer - From Wikipedia [#]_: - - A ceilometer is a device that uses a laser or other light - source to determine the height of a cloud base. - polling agent Software service running either on a central management node within the OpenStack infrastructure or compute node measuring usage and sending the @@ -59,10 +53,10 @@ number of disk io requests, etc. Three types of meters are defined in ceilometer: - * Cumulative: Increasing over time (e.g. disk I/O) - * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating - values (e.g. number of Swift objects) - * Delta: Incremental change to a counter over time (e.g. bandwidth delta) + * Cumulative: Increasing over time (e.g. disk I/O) + * Gauge: Discrete items (e.g. floating IPs, image uploads) and fluctuating + values (e.g. number of Swift objects) + * Delta: Incremental change to a counter over time (e.g. bandwidth delta) metering Metering is the process of collecting information about what, @@ -77,14 +71,13 @@ RPC driver. non-repudiable - From Wikipedia [#]_: - - Non-repudiation refers to a state of affairs where the purported - maker of a statement will not be able to successfully challenge - the validity of the statement or contract. The term is often - seen in a legal setting wherein the authenticity of a signature - is being challenged. In such an instance, the authenticity is - being "repudiated". + "Non-repudiation refers to a state of affairs where the purported + maker of a statement will not be able to successfully challenge + the validity of the statement or contract. The term is often + seen in a legal setting wherein the authenticity of a signature + is being challenged. In such an instance, the authenticity is + being "repudiated"." + (Wikipedia, [#]_) project The OpenStack tenant or project. diff -Nru ceilometer-10.0.1/doc/source/install/index.rst ceilometer-11.0.0/doc/source/install/index.rst --- ceilometer-10.0.1/doc/source/install/index.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/install/index.rst 2018-07-30 18:08:00.000000000 +0000 @@ -6,8 +6,8 @@ :maxdepth: 2 get_started.rst - install-compute.rst install-controller.rst + install-compute.rst verify.rst next-steps.rst diff -Nru ceilometer-10.0.1/doc/source/install/install-compute-common.inc ceilometer-11.0.0/doc/source/install/install-compute-common.inc --- ceilometer-10.0.1/doc/source/install/install-compute-common.inc 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/install/install-compute-common.inc 2018-07-30 18:08:00.000000000 +0000 @@ -59,13 +59,13 @@ To enable IPMI meters, ensure IPMITool is installed and the host supports Intel Node Manager. -* Edit the ``/etc/sudoers`` file and include:: +* Edit the ``/etc/sudoers`` file and include: .. code-block:: ini ceilometer ALL = (root) NOPASSWD: /usr/bin/ceilometer-rootwrap /etc/ceilometer/rootwrap.conf * -* Edit the ``/etc/ceilometer/polling.yaml`` to include the required meters, for example:: +* Edit the ``/etc/ceilometer/polling.yaml`` to include the required meters, for example: .. code-block:: yaml diff -Nru ceilometer-10.0.1/doc/source/install/neutron/install-neutron-obs.rst ceilometer-11.0.0/doc/source/install/neutron/install-neutron-obs.rst --- ceilometer-10.0.1/doc/source/install/neutron/install-neutron-obs.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/install/neutron/install-neutron-obs.rst 2018-07-30 18:08:00.000000000 +0000 @@ -5,7 +5,7 @@ these steps on the controller node. Configure the Networking service to use Telemetry ----------------------------------------------------- +------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: diff -Nru ceilometer-10.0.1/doc/source/install/neutron/install-neutron-rdo.rst ceilometer-11.0.0/doc/source/install/neutron/install-neutron-rdo.rst --- ceilometer-10.0.1/doc/source/install/neutron/install-neutron-rdo.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/install/neutron/install-neutron-rdo.rst 2018-07-30 18:08:00.000000000 +0000 @@ -5,7 +5,7 @@ these steps on the controller node. Configure the Networking service to use Telemetry ----------------------------------------------------- +------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: diff -Nru ceilometer-10.0.1/doc/source/install/neutron/install-neutron-ubuntu.rst ceilometer-11.0.0/doc/source/install/neutron/install-neutron-ubuntu.rst --- ceilometer-10.0.1/doc/source/install/neutron/install-neutron-ubuntu.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/doc/source/install/neutron/install-neutron-ubuntu.rst 2018-07-30 18:08:00.000000000 +0000 @@ -5,7 +5,7 @@ these steps on the controller node. Configure the Networking service to use Telemetry ----------------------------------------------------- +------------------------------------------------- * Edit the ``/etc/neutron/neutron.conf`` and complete the following actions: diff -Nru ceilometer-10.0.1/MAINTAINERS ceilometer-11.0.0/MAINTAINERS --- ceilometer-10.0.1/MAINTAINERS 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/MAINTAINERS 2018-07-30 18:08:00.000000000 +0000 @@ -12,7 +12,5 @@ gordc jd__ lhx -liusheng -llu pradk sileht diff -Nru ceilometer-10.0.1/PKG-INFO ceilometer-11.0.0/PKG-INFO --- ceilometer-10.0.1/PKG-INFO 2018-06-14 13:58:09.000000000 +0000 +++ ceilometer-11.0.0/PKG-INFO 2018-07-30 18:10:32.000000000 +0000 @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: ceilometer -Version: 10.0.1 +Version: 11.0.0 Summary: OpenStack Telemetry Home-page: https://docs.openstack.org/ceilometer/latest/ Author: OpenStack @@ -72,6 +72,6 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Topic :: System :: Monitoring -Provides-Extra: zaqar -Provides-Extra: test Provides-Extra: gnocchi +Provides-Extra: test +Provides-Extra: zaqar diff -Nru ceilometer-10.0.1/playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml ceilometer-11.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml --- ceilometer-10.0.1/playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/post.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,80 +0,0 @@ -- hosts: primary - tasks: - - - name: Copy files from {{ ansible_user_dir }}/workspace/ on node - synchronize: - src: '{{ ansible_user_dir }}/workspace/' - dest: '{{ zuul.executor.log_root }}' - mode: pull - copy_links: true - verify_host: true - rsync_opts: - - --include=**/*nose_results.html - - --include=*/ - - --exclude=* - - --prune-empty-dirs - - - name: Copy files from {{ ansible_user_dir }}/workspace/ on node - synchronize: - src: '{{ ansible_user_dir }}/workspace/' - dest: '{{ zuul.executor.log_root }}' - mode: pull - copy_links: true - verify_host: true - rsync_opts: - - --include=**/*testr_results.html.gz - - --include=*/ - - --exclude=* - - --prune-empty-dirs - - - name: Copy files from {{ ansible_user_dir }}/workspace/ on node - synchronize: - src: '{{ ansible_user_dir }}/workspace/' - dest: '{{ zuul.executor.log_root }}' - mode: pull - copy_links: true - verify_host: true - rsync_opts: - - --include=/.testrepository/tmp* - - --include=*/ - - --exclude=* - - --prune-empty-dirs - - - name: Copy files from {{ ansible_user_dir }}/workspace/ on node - synchronize: - src: '{{ ansible_user_dir }}/workspace/' - dest: '{{ zuul.executor.log_root }}' - mode: pull - copy_links: true - verify_host: true - rsync_opts: - - --include=**/*testrepository.subunit.gz - - --include=*/ - - --exclude=* - - --prune-empty-dirs - - - name: Copy files from {{ ansible_user_dir }}/workspace/ on node - synchronize: - src: '{{ ansible_user_dir }}/workspace/' - dest: '{{ zuul.executor.log_root }}/tox' - mode: pull - copy_links: true - verify_host: true - rsync_opts: - - --include=/.tox/*/log/* - - --include=*/ - - --exclude=* - - --prune-empty-dirs - - - name: Copy files from {{ ansible_user_dir }}/workspace/ on node - synchronize: - src: '{{ ansible_user_dir }}/workspace/' - dest: '{{ zuul.executor.log_root }}' - mode: pull - copy_links: true - verify_host: true - rsync_opts: - - --include=/logs/** - - --include=*/ - - --exclude=* - - --prune-empty-dirs diff -Nru ceilometer-10.0.1/playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml ceilometer-11.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml --- ceilometer-10.0.1/playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/playbooks/legacy/telemetry-dsvm-integration-ceilometer/run.yaml 1970-01-01 00:00:00.000000000 +0000 @@ -1,57 +0,0 @@ -- hosts: all - name: Autoconverted job legacy-telemetry-dsvm-integration-ceilometer from old job - gate-telemetry-dsvm-integration-ceilometer-ubuntu-xenial - tasks: - - - name: Ensure legacy workspace directory - file: - path: '{{ ansible_user_dir }}/workspace' - state: directory - - - shell: - cmd: | - set -e - set -x - cat > clonemap.yaml << EOF - clonemap: - - name: openstack-infra/devstack-gate - dest: devstack-gate - EOF - /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ - git://git.openstack.org \ - openstack-infra/devstack-gate - executable: /bin/bash - chdir: '{{ ansible_user_dir }}/workspace' - environment: '{{ zuul | zuul_legacy_vars }}' - - - shell: - cmd: | - set -e - set -x - export PYTHONUNBUFFERED=true - export DEVSTACK_GATE_HEAT=1 - export DEVSTACK_GATE_NEUTRON=1 - export DEVSTACK_GATE_TEMPEST=1 - export DEVSTACK_GATE_EXERCISES=0 - export DEVSTACK_GATE_INSTALL_TESTONLY=1 - export DEVSTACK_GATE_TEMPEST_NOTESTS=1 - export PROJECTS="openstack/ceilometer openstack/aodh openstack/panko openstack/telemetry-tempest-plugin" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin panko git://git.openstack.org/openstack/panko" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin aodh git://git.openstack.org/openstack/aodh" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin heat git://git.openstack.org/openstack/heat" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin telemetry-tempest-plugin git://git.openstack.org/openstack/telemetry-tempest-plugin" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_BACKEND=gnocchi" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"GNOCCHI_ARCHIVE_POLICY=high" - export DEVSTACK_LOCAL_CONFIG+=$'\n'"CEILOMETER_PIPELINE_INTERVAL=15" - export DEVSTACK_PROJECT_FROM_GIT=$ZUUL_SHORT_PROJECT_NAME - function post_test_hook { - cd /opt/stack/new/telemetry-tempest-plugin/telemetry_tempest_plugin/integration/hooks/ - ./post_test_hook.sh - } - export -f post_test_hook - cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh - ./safe-devstack-vm-gate-wrap.sh - executable: /bin/bash - chdir: '{{ ansible_user_dir }}/workspace' - environment: '{{ zuul | zuul_legacy_vars }}' diff -Nru ceilometer-10.0.1/releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml ceilometer-11.0.0/releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml --- ceilometer-10.0.1/releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,8 @@ +--- +upgrade: + - | + `ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi. +fixes: + - | + Ceilometer previously did not create IPMI sensor data from IPMI agent or + Ironic in Gnocchi. This data is now pushed to Gnocchi. diff -Nru ceilometer-10.0.1/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml ceilometer-11.0.0/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml --- ceilometer-10.0.1/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,6 @@ +--- +issues: + - | + Ceilometer created metrics that could never get measures depending on the + polling configuration. Metrics are now created only if Ceilometer gets at + least a measure for them. diff -Nru ceilometer-10.0.1/releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml ceilometer-11.0.0/releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml --- ceilometer-10.0.1/releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,4 @@ +--- +features: + - | + `launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked. diff -Nru ceilometer-10.0.1/releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml ceilometer-11.0.0/releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml --- ceilometer-10.0.1/releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml 2018-07-30 18:08:11.000000000 +0000 @@ -0,0 +1,13 @@ +--- +features: + - > + Add support for configuring the size of samples the poller will send in + each batch. +upgrade: + - > + batch_size option added to [polling] section of configuration. + Use batch_size=0 to disable batching of samples. +deprecations: + - > + The option batch_polled_samples in the [DEFAULT] section is deprecated. + Use batch_size option in [polling] to configure and/or disable batching. diff -Nru ceilometer-10.0.1/releasenotes/notes/prometheus-bcb201cfe46d5778.yaml ceilometer-11.0.0/releasenotes/notes/prometheus-bcb201cfe46d5778.yaml --- ceilometer-10.0.1/releasenotes/notes/prometheus-bcb201cfe46d5778.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/prometheus-bcb201cfe46d5778.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,4 @@ +--- +features: + - | + A new pulisher have been added to push data to Prometheus Pushgateway. diff -Nru ceilometer-10.0.1/releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml ceilometer-11.0.0/releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml --- ceilometer-10.0.1/releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead. diff -Nru ceilometer-10.0.1/releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml ceilometer-11.0.0/releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml --- ceilometer-10.0.1/releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,4 @@ +--- +upgrade: + - | + The deprecated `gnocchi_dispatcher` option group has been removed. diff -Nru ceilometer-10.0.1/releasenotes/notes/reno.cache ceilometer-11.0.0/releasenotes/notes/reno.cache --- ceilometer-10.0.1/releasenotes/notes/reno.cache 2018-06-14 13:58:09.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/reno.cache 2018-07-30 18:10:31.000000000 +0000 @@ -7,7 +7,136 @@ capture total time used by read or write operations. '] + releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml: + fixes: ['Ceilometer previously did not create IPMI sensor data from IPMI agent + or + + Ironic in Gnocchi. This data is now pushed to Gnocchi. + + '] + upgrade: ['`ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi. + + '] + releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml: + issues: ['Ceilometer created metrics that could never get measures depending on + the + + polling configuration. Metrics are now created only if Ceilometer gets at + + least a measure for them. + + '] + releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml: + features: ['`launched_at`/`created_at`/`deleted_at` of Nova instances are now + tracked. + + '] + releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml: + deprecations: ['The option batch_polled_samples in the [DEFAULT] section is deprecated. + Use batch_size option in [polling] to configure and/or disable batching. + + '] + features: ['Add support for configuring the size of samples the poller will send + in each batch. + + '] + upgrade: ['batch_size option added to [polling] section of configuration. Use + batch_size=0 to disable batching of samples. + + '] + releasenotes/notes/prometheus-bcb201cfe46d5778.yaml: + features: ['A new pulisher have been added to push data to Prometheus Pushgateway. + + '] + releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml: + upgrade: ['The deprecated `gnocchi_dispatcher` option group has been removed. + + '] + releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml: + upgrade: ['Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead. + + '] + releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml: + deprecations: ['cpu_util and \*.rate meters are deprecated and will be removed + in future + + release in favor of the Gnocchi rate calculation equivalent. + + '] + features: ['Archive policies can now be configured per metrics in gnocchi_resources.yaml. + + A default list of archive policies is now created by Ceilometer. + + They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" + + for others. + + '] + upgrade: ['Ceilometer now creates it own archive policies in Gnocchi and use them + to + + create metrics in Gnocchi. Old metrics kept their current archive policies + + and will not be updated with ceilometer-upgrade. Only newly created metrics + + will be impacted. Archive policy can still be overridden with the publisher + url + + (e.g: gnocchi://archive_policy=high). + + '] + releasenotes/notes/transformer-ed4b1ea7d1752576.yaml: + deprecations: ['Usage of transformers in Ceilometer pipelines is deprecated. Transformers + in Ceilometer + + have never computed samples correctly when you have multiple workers. This + functionality can + + be done by the storage backend easily without all issues that Ceilometer has. + For example, the + + rating is already computed in Gnocchi today. + + ', 'Pipeline Partitioning is also deprecated. This was only useful to + + workaround of some issues that tranformers has. + + '] + releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml: + features: [use memory usable metric from libvirt memoryStats if available.] notes: - files: - - [releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml, f4b58ae01e8ddfc515e6f14a0d19d726370f4870] - version: 10.0.1 + - - releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml + - !!binary | + OGZkZDE5ZTc4YTIwNTMyODU1NjljZGEwNWNkYzQ4NzViNzE2MTkwYw== + - - releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml + - !!binary | + NjYzYzUyMzMyODY5MGRmY2MzMGMxYWQ5ODZiYTU3ZTU2NmJkMTk0Yw== + - - releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml + - !!binary | + ODI2YmEzNWM2ZWI5OTAwYmIwYTU1N2Y2ZTRmMDZmN2QxYjliZDM5NA== + - - releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml + - !!binary | + MzY0MTRlMWNlYmUzYTQzZDk2MmY4ZDJhZGZlN2NjMzQ3NDJlOTA1Nw== + - - releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml + - !!binary | + MmRjMjFhNWYwNWVlNjcwMjkyYThhN2Y5Nzk1MmQzOTQyYzMyZjVjZg== + - - releasenotes/notes/prometheus-bcb201cfe46d5778.yaml + - !!binary | + MmI4MDUyMDUyZDg2MWI4NTZiMzUyMmE4ZDdmODU3NzM1NzkzZjAxYg== + - - releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml + - !!binary | + YzU2NzI1ODk3OTA2NGQ0YTZlODIwNTdmNjg1ODdiMTg0ZWU5MzlhYQ== + - - releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml + - !!binary | + ZGQxYjdhYmYzMjk3NTVjODM3Nzg2MjMyOGY3NzBlMGI3OTc0ZjVjMg== + - - releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml + - !!binary | + ZTkwNmJjZGE4MjkxOGFmZjAwMGFiNzZmMDY3YTJkYzQ5NjYwZDBiNA== + - - releasenotes/notes/transformer-ed4b1ea7d1752576.yaml + - !!binary | + MWRjYmQ2MDdkZjA2OTYxMDFiNDBmNzdkNzcyMTQ4OTY3OWViZTBiYQ== + - - releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml + - !!binary | + MmRlZTQ4NWRhN2E2ZjJjZGY5NjUyNWZhYmMxOGE4YzI3YzhiZTU3MA== + version: 11.0.0 diff -Nru ceilometer-10.0.1/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml ceilometer-11.0.0/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml --- ceilometer-10.0.1/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,18 @@ +--- +features: + - | + Archive policies can now be configured per metrics in gnocchi_resources.yaml. + A default list of archive policies is now created by Ceilometer. + They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" + for others. +upgrade: + - | + Ceilometer now creates it own archive policies in Gnocchi and use them to + create metrics in Gnocchi. Old metrics kept their current archive policies + and will not be updated with ceilometer-upgrade. Only newly created metrics + will be impacted. Archive policy can still be overridden with the publisher url + (e.g: gnocchi://archive_policy=high). +deprecations: + - | + cpu_util and \*.rate meters are deprecated and will be removed in future + release in favor of the Gnocchi rate calculation equivalent. diff -Nru ceilometer-10.0.1/releasenotes/notes/transformer-ed4b1ea7d1752576.yaml ceilometer-11.0.0/releasenotes/notes/transformer-ed4b1ea7d1752576.yaml --- ceilometer-10.0.1/releasenotes/notes/transformer-ed4b1ea7d1752576.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/transformer-ed4b1ea7d1752576.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,10 @@ +--- +deprecations: + - | + Usage of transformers in Ceilometer pipelines is deprecated. Transformers in Ceilometer + have never computed samples correctly when you have multiple workers. This functionality can + be done by the storage backend easily without all issues that Ceilometer has. For example, the + rating is already computed in Gnocchi today. + - | + Pipeline Partitioning is also deprecated. This was only useful to + workaround of some issues that tranformers has. diff -Nru ceilometer-10.0.1/releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml ceilometer-11.0.0/releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml --- ceilometer-10.0.1/releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,3 @@ +--- +features: + - use memory usable metric from libvirt memoryStats if available. diff -Nru ceilometer-10.0.1/releasenotes/source/index.rst ceilometer-11.0.0/releasenotes/source/index.rst --- ceilometer-10.0.1/releasenotes/source/index.rst 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/source/index.rst 2018-07-30 18:08:00.000000000 +0000 @@ -6,6 +6,7 @@ :maxdepth: 1 unreleased + queens pike ocata newton diff -Nru ceilometer-10.0.1/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po ceilometer-11.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po --- ceilometer-10.0.1/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,1310 @@ +# Andi Chandler , 2017. #zanata +# Andi Chandler , 2018. #zanata +msgid "" +msgstr "" +"Project-Id-Version: ceilometer\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2018-06-22 14:43+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2018-05-18 06:18+0000\n" +"Last-Translator: Andi Chandler \n" +"Language-Team: English (United Kingdom)\n" +"Language: en_GB\n" +"X-Generator: Zanata 4.3.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" + +msgid "10.0.0" +msgstr "10.0.0" + +msgid "5.0.1" +msgstr "5.0.1" + +msgid "5.0.2" +msgstr "5.0.2" + +msgid "5.0.3" +msgstr "5.0.3" + +msgid "6.0.0" +msgstr "6.0.0" + +msgid "7.0.0" +msgstr "7.0.0" + +msgid "7.0.1" +msgstr "7.0.1" + +msgid "7.0.5" +msgstr "7.0.5" + +msgid "8.0.0" +msgstr "8.0.0" + +msgid "9.0.0" +msgstr "9.0.0" + +msgid "" +"A dogpile.cache supported backend is required to enable cache. Additional " +"configuration `options `_ are also required." +msgstr "" +"A dogpile.cache supported backend is required to enable cache. Additional " +"configuration `options `_ are also required." + +msgid "" +"A local cache is used when polling instance metrics to minimise calls Nova " +"API. A new option is added `resource_cache_expiry` to configure a time to " +"live for cache before it expires. This resolves issue where migrated " +"instances are not removed from cache." +msgstr "" +"A local cache is used when polling instance metrics to minimise calls Nova " +"API. A new option is added `resource_cache_expiry` to configure a time to " +"live for cache before it expires. This resolves issue where migrated " +"instances are not removed from cache." + +msgid "" +"A local cache is used when polling instance metrics to minimise calls Nova " +"API. A new option is added `resource_cache_expiry` to configure a time to " +"live for cache before it expires. This resolves issue where migrated " +"instances are not removed from cache. This is only relevant when " +"`instance_discovery_method` is set to `naive`. It is recommended to use " +"`libvirt_metadata` if possible." +msgstr "" +"A local cache is used when polling instance metrics to minimise calls Nova " +"API. A new option is added `resource_cache_expiry` to configure a time to " +"live for cache before it expires. This resolves issue where migrated " +"instances are not removed from cache. This is only relevant when " +"`instance_discovery_method` is set to `naive`. It is recommended to use " +"`libvirt_metadata` if possible." + +msgid "" +"A new option named `max_parallel_requests` is available to control the " +"maximum number of parallel requests that can be executed by the agents. This " +"option also replaces the `poolsize` option of the HTTP publisher." +msgstr "" +"A new option named `max_parallel_requests` is available to control the " +"maximum number of parallel requests that can be executed by the agents. This " +"option also replaces the `poolsize` option of the HTTP publisher." + +msgid "A new pulisher have been added to push data to Prometheus Pushgateway." +msgstr "" +"A new publisher have been added to push data to Prometheus Pushgateway." + +msgid "" +"Add `disk.device.read.latency` and `disk.device.write.latency` meters to " +"capture total time used by read or write operations." +msgstr "" +"Add `disk.device.read.latency` and `disk.device.write.latency` meters to " +"capture total time used by read or write operations." + +msgid "" +"Add a ceilometer driver to collect network statistics information using REST " +"APIs exposed by network-statistics module in OpenDaylight." +msgstr "" +"Add a Ceilometer driver to collect network statistics information using REST " +"APIs exposed by network-statistics module in OpenDaylight." + +msgid "Add a new publisher for pushing samples or events to a Zaqar queue." +msgstr "Add a new publisher for pushing samples or events to a Zaqar queue." + +msgid "" +"Add a tool for migrating metrics data from Ceilometer's native storage to " +"Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the " +"recommended metrics data storage backend." +msgstr "" +"Add a tool for migrating metrics data from Ceilometer's native storage to " +"Gnocchi. Since we have deprecated Ceilometer API and the Gnocchi will be the " +"recommended metrics data storage backend." + +msgid "" +"Add four new meters, including perf.cpu.cycles for the number of cpu cycles " +"one instruction needs, perf.instructions for the count of instructions, perf." +"cache_references for the count of cache hits and cache_misses for the count " +"of caches misses." +msgstr "" +"Add four new meters, including perf.cpu.cycles for the number of cpu cycles " +"one instruction needs, perf.instructions for the count of instructions, perf." +"cache_references for the count of cache hits and cache_misses for the count " +"of caches misses." + +msgid "" +"Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio " +"details." +msgstr "" +"Add hardware.disk.read.* and hardware.disk.write.* metrics to capture diskio " +"details." + +msgid "" +"Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap." +"out'." +msgstr "" +"Add memory swap metric for VM, including 'memory.swap.in' and 'memory.swap." +"out'." + +msgid "Add support for Keystone v3 authentication" +msgstr "Add support for Keystone v3 authentication" + +msgid "" +"Add support for batch processing of messages from queue. This will allow the " +"collector and notification agent to grab multiple messages per thread to " +"enable more efficient processing." +msgstr "" +"Add support for batch processing of messages from queue. This will allow the " +"collector and notification agent to grab multiple messages per thread to " +"enable more efficient processing." + +msgid "Add support for network statistics meters with gnocchi" +msgstr "Add support for network statistics meters with Gnocchi" + +msgid "" +"Add support of batch recording metering data to mongodb backend, since the " +"pymongo support *insert_many* interface which can be used to batch record " +"items, in \"big-data\" scenarios, this change can improve the performance of " +"metering data recording." +msgstr "" +"Add support of batch recording metering data to MongoDB backend, since the " +"pymongo support *insert_many* interface which can be used to batch record " +"items, in \"big-data\" scenarios, this change can improve the performance of " +"metering data recording." + +msgid "" +"Add support of metering the size of cinder volume/snapshot/backup. Like " +"other meters, these are useful for billing system." +msgstr "" +"Add support of metering the size of Cinder volume/snapshot/backup. Like " +"other meters, these are useful for billing system." + +msgid "" +"Add support to capture volume capacity usage details from cinder. This data " +"is extracted from notifications sent by Cinder starting in Ocata." +msgstr "" +"Add support to capture volume capacity usage details from Cinder. This data " +"is extracted from notifications sent by Cinder starting in Ocata." + +msgid "" +"Add two new meters, including memory.bandwidth.total and memory.bandwidth." +"local, to get memory bandwidth statistics based on Intel CMT feature." +msgstr "" +"Add two new meters, including memory.bandwidth.total and memory.bandwidth." +"local, to get memory bandwidth statistics based on Intel CMT feature." + +msgid "Added support for magnum bay CRUD events, event_type is 'magnum.bay.*'." +msgstr "" +"Added support for Magnum bay CRUD events, event_type is 'magnum.bay.*'." + +msgid "" +"Addition pipelines can be created following the format used by existing " +"pipelines." +msgstr "" +"Addition pipelines can be created following the format used by existing " +"pipelines." + +msgid "" +"Allow users to add additional exchanges in ceilometer.conf instead of " +"hardcoding exchanges. Now original http_control_exchanges is being " +"deprecated and renamed notification_control_exchanges. Besides, the new " +"option is integrated with other exchanges in default EXCHANGE_OPTS to make " +"it available to extend additional exchanges." +msgstr "" +"Allow users to add additional exchanges in ceilometer.conf instead of " +"hardcoding exchanges. Now original http_control_exchanges is being " +"deprecated and renamed notification_control_exchanges. Besides, the new " +"option is integrated with other exchanges in default EXCHANGE_OPTS to make " +"it available to extend additional exchanges." + +msgid "" +"Any existing commands utilising `image` meter should be switched to `image." +"size` meter which will provide equivalent functionality" +msgstr "" +"Any existing commands utilising `image` meter should be switched to `image." +"size` meter which will provide equivalent functionality" + +msgid "" +"Archive policies can now be configured per metrics in gnocchi_resources." +"yaml. A default list of archive policies is now created by Ceilometer. They " +"are called \"ceilometer-low-rate\" for all IOs metrics and \"ceilometer-low" +"\" for others." +msgstr "" +"Archive policies can now be configured per metrics in gnocchi_resources." +"yaml. A default list of archive policies is now created by Ceilometer. They " +"are called \"ceilometer-low-rate\" for all IO metrics and \"ceilometer-low\" " +"for others." + +msgid "" +"As the collector service is being deprecated, the duplication of publishers " +"and dispatchers is being addressed. The http dispatcher is now marked as " +"deprecated and the recommended path is to use http publisher." +msgstr "" +"As the collector service is being deprecated, the duplication of publishers " +"and dispatchers is being addressed. The http dispatcher is now marked as " +"deprecated and the recommended path is to use http publisher." + +msgid "" +"Batching is enabled by default now when coordinated workers are enabled. " +"Depending on load, it is recommended to scale out the number of " +"`pipeline_processing_queues` to improve distribution. `batch_size` should " +"also be configured accordingly." +msgstr "" +"Batching is enabled by default now when coordinated workers are enabled. " +"Depending on load, it is recommended to scale out the number of " +"`pipeline_processing_queues` to improve distribution. `batch_size` should " +"also be configured accordingly." + +msgid "" +"Because of deprecating the collector, the default publishers in pipeline." +"yaml and event_pipeline.yaml are now changed using database instead of " +"notifier." +msgstr "" +"Because of deprecating the collector, the default publishers in pipeline." +"yaml and event_pipeline.yaml are now changed using database instead of " +"notifier." + +msgid "Bug Fixes" +msgstr "Bug Fixes" + +msgid "" +"By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add " +"neutron_lbaas_version=v1 option to configuration file." +msgstr "" +"By default, Ceilometer will poll the v2 API. To poll legacy v1 API, add " +"neutron_lbaas_version=v1 option to configuration file." + +msgid "" +"Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi " +"(metrics), and/or Panko (events)." +msgstr "" +"Ceilometer API is deprecated. Use the APIs from Aodh (alarms), Gnocchi " +"(metrics), and/or Panko (events)." + +msgid "Ceilometer Release Notes" +msgstr "Ceilometer Release Notes" + +msgid "" +"Ceilometer alarms code is now fully removed from code base. Equivalent " +"functionality is handled by Aodh." +msgstr "" +"Ceilometer alarms code is now fully removed from code base. Equivalent " +"functionality is handled by Aodh." + +msgid "" +"Ceilometer backends are no more only databases but also REST API like " +"Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and " +"have been renamed ceilometer-upgrade. The new binary handles database schema " +"upgrade like ceilometer-dbsync does, but it also handle any changes needed " +"in configured ceilometer backends like Gnocchi." +msgstr "" +"Ceilometer backends are no more only databases but also REST API like " +"Gnocchi. So ceilometer-dbsync binary name doesn't make a lot of sense and " +"have been renamed ceilometer-upgrade. The new binary handles database schema " +"upgrade like ceilometer-dbsync does, but it also handle any changes needed " +"in configured Ceilometer backends like Gnocchi." + +msgid "" +"Ceilometer legacy backends and Ceilometer API are now deprecated. Polling " +"all nova instances from compute agent is no more required with Gnocchi. So " +"we switch the [compute]instance_discovery_method to libvirt_metadata. To " +"switch back to the old deprecated behavior you can set it back to 'naive'." +msgstr "" +"Ceilometer legacy backends and Ceilometer API are now deprecated. Polling " +"all nova instances from compute agent is no more required with Gnocchi. So " +"we switch the [compute]instance_discovery_method to libvirt_metadata. To " +"switch back to the old deprecated behaviour you can set it back to 'naive'." + +msgid "" +"Ceilometer now creates it own archive policies in Gnocchi and use them to " +"create metrics in Gnocchi. Old metrics kept their current archive policies " +"and will not be updated with ceilometer-upgrade. Only newly created metrics " +"will be impacted. Archive policy can still be overridden with the publisher " +"url (e.g: gnocchi://archive_policy=high)." +msgstr "" +"Ceilometer now creates it own archive policies in Gnocchi and uses them to " +"create metrics in Gnocchi. Old metrics keep their current archive policies " +"and will not be updated with ceilometer-upgrade. Only newly created metrics " +"will be impacted. Archive policy can still be overridden with the publisher " +"URL (e.g: gnocchi://archive_policy=high)." + +msgid "" +"Ceilometer now leverages the latest distribution mechanism provided by the " +"tooz library. Therefore the options `coordination.retry_backoff` and " +"`coordination.max_retry_interval` do not exist anymore." +msgstr "" +"Ceilometer now leverages the latest distribution mechanism provided by the " +"tooz library. Therefore the options `coordination.retry_backoff` and " +"`coordination.max_retry_interval` do not exist any more." + +msgid "" +"Ceilometer previously did not create IPMI sensor data from IPMI agent or " +"Ironic in Gnocchi. This data is now pushed to Gnocchi." +msgstr "" +"Ceilometer previously did not create IPMI sensor data from IPMI agent or " +"Ironic in Gnocchi. This data is now pushed to Gnocchi." + +msgid "" +"Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. " +"The purpose of this middleware is to set up the request URL correctly in " +"case there is a proxy (for instance, a loadbalancer such as HAProxy) in " +"front of Ceilometer. So, for instance, when TLS connections are being " +"terminated in the proxy, and one tries to get the versions from the / " +"resource of Ceilometer, one will notice that the protocol is incorrect; It " +"will show 'http' instead of 'https'. So this middleware handles such cases. " +"Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off " +"by default and needs to be enabled via a configuration value." +msgstr "" +"Ceilometer sets up the HTTPProxyToWSGI middleware in front of Ceilometer. " +"The purpose of this middleware is to set up the request URL correctly in " +"case there is a proxy (for instance, a load balancer such as HAProxy) in " +"front of Ceilometer. So, for instance, when TLS connections are being " +"terminated in the proxy, and one tries to get the versions from the / " +"resource of Ceilometer, one will notice that the protocol is incorrect; It " +"will show 'http' instead of 'https'. So this middleware handles such cases. " +"Thus helping Keystone discovery work correctly. The HTTPProxyToWSGI is off " +"by default and needs to be enabled via a configuration value." + +msgid "" +"Ceilometer supports generic notifier to publish data and allow user to " +"customize parameters such as topic, transport driver and priority. The " +"publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:" +"[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit " +"driver, but also other driver like kafka can be used." +msgstr "" +"Ceilometer supports generic notifier to publish data and allow user to " +"customise parameters such as topic, transport driver and priority. The " +"publisher configuration in pipeline.yaml can be notifer://[notifier_ip]:" +"[notifier_port]?topic=[topic]&driver=driver&max_retry=100 Not only rabbit " +"driver, but also other driver like Kafka can be used." + +msgid "" +"Collector is no longer supported in this release. The collector introduces " +"lags in pushing data to backend. To optimize the architecture, Ceilometer " +"push data through dispatchers using publishers in notification agent " +"directly." +msgstr "" +"Collector is no longer supported in this release. The collector introduces " +"lags in pushing data to backend. To optimise the architecture, Ceilometer " +"pushes data through dispatchers using publishers in notification agent " +"directly." + +msgid "" +"Configuration values can passed in via the querystring of publisher in " +"pipeline. For example, rather than setting target, timeout, verify_ssl, and " +"batch_mode under [dispatcher_http] section of conf, you can specify http://" +"/?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only " +"the raw details of event are required." +msgstr "" +"Configuration values can passed in via the querystring of publisher in " +"pipeline. For example, rather than setting target, timeout, verify_ssl, and " +"batch_mode under [dispatcher_http] section of conf, you can specify http://" +"/?verify_ssl=True&batch=True&timeout=10. Use `raw_only=1` if only " +"the raw details of event are required." + +msgid "" +"Configure individual dispatchers by specifying meter_dispatchers and " +"event_dispatchers in configuration file." +msgstr "" +"Configure individual dispatchers by specifying meter_dispatchers and " +"event_dispatchers in configuration file." + +msgid "Critical Issues" +msgstr "Critical Issues" + +msgid "Current Series Release Notes" +msgstr "Current Series Release Notes" + +msgid "Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead." +msgstr "Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead." + +msgid "" +"Deprecating support for enabling pollsters via command line. Meter and " +"pollster enablement should be configured via polling.yaml file." +msgstr "" +"Deprecating support for enabling pollsters via command line. Meter and " +"pollster enablement should be configured via polling.yaml file." + +msgid "Deprecation Notes" +msgstr "Deprecation Notes" + +msgid "Fix ability to enable/disable radosgw.* meters explicitly" +msgstr "Fix ability to enable/disable radosgw.* meters explicitly" + +msgid "Fix samples from Heat to map to correct Gnocchi resource type" +msgstr "Fix samples from Heat to map to correct Gnocchi resource type" + +msgid "" +"Fix to improve handling messages in environments heavily backed up. " +"Previously, notification handlers greedily grabbed messages from queues " +"which could cause ordering issues. A fix was applied to sequentially process " +"messages in a single thread to prevent ordering issues." +msgstr "" +"Fix to improve handling messages in environments heavily backed up. " +"Previously, notification handlers greedily grabbed messages from queues " +"which could cause ordering issues. A fix was applied to sequentially process " +"messages in a single thread to prevent ordering issues." + +msgid "" +"For backward compatibility reason we temporary keep ceilometer-dbsync, at " +"least for one major version to ensure deployer have time update their " +"tooling." +msgstr "" +"For backward compatibility reason we temporary keep ceilometer-dbsync, at " +"least for one major version to ensure deployers have time update their " +"tooling." + +msgid "Gnocchi dispatcher now uses client rather than direct http requests" +msgstr "Gnocchi dispatcher now uses client rather than direct HTTP requests" + +msgid "" +"If workload partitioning of the notification agent is enabled, the " +"notification agent should not run alongside pre-Queens agents. Doing so may " +"result in missed samples when leveraging transformations. To upgrade without " +"loss of data, set `notification_control_exchanges` option to empty so only " +"existing `ceilometer-pipe-*` queues are processed. Once cleared, reset " +"`notification_control_exchanges` option and launch the new notification " +"agent(s). If `workload_partitioning` is not enabled, no special steps are " +"required." +msgstr "" +"If workload partitioning of the notification agent is enabled, the " +"notification agent should not run alongside pre-Queens agents. Doing so may " +"result in missed samples when leveraging transformations. To upgrade without " +"loss of data, set `notification_control_exchanges` option to empty so only " +"existing `ceilometer-pipe-*` queues are processed. Once cleared, reset " +"`notification_control_exchanges` option and launch the new notification " +"agent(s). If `workload_partitioning` is not enabled, no special steps are " +"required." + +msgid "" +"If you are using Gnocchi as backend it's strongly recommended to switch " +"[compute]/instance_discovery_method to libvirt_metadata. This will reduce " +"the load on the Nova API especially if you have many compute nodes." +msgstr "" +"If you are using Gnocchi as backend it's strongly recommended to switch " +"[compute]/instance_discovery_method to libvirt_metadata. This will reduce " +"the load on the Nova API especially if you have many compute nodes." + +msgid "" +"In an effort to minimise the noise, Ceilometer will no longer produce meters " +"which have no measureable data associated with it. Image meter only captures " +"state information which is already captured in events and other meters." +msgstr "" +"In an effort to minimise the noise, Ceilometer will no longer produce meters " +"which have no measurable data associated with it. Image meter only captures " +"state information which is already captured in events and other meters." + +msgid "" +"In the 'publishers' section of a meter/event pipeline definition, https:// " +"can now be used in addition to http://. Furthermore, either Basic or client-" +"certificate authentication can be used (obviously, client cert only makes " +"sense in the https case). For Basic authentication, use the form http://" +"username:password@hostname/. For client certificate authentication pass the " +"client certificate's path (and the key file path, if the key is not in the " +"certificate file) using the parameters 'clientcert' and 'clientkey', e.g. " +"https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any " +"parameters or credentials used for http(s) publishers are removed from the " +"URL before the actual HTTP request is made." +msgstr "" +"In the 'publishers' section of a meter/event pipeline definition, https:// " +"can now be used in addition to http://. Furthermore, either Basic or client-" +"certificate authentication can be used (obviously, client cert only makes " +"sense in the https case). For Basic authentication, use the form http://" +"username:password@hostname/. For client certificate authentication pass the " +"client certificate's path (and the key file path, if the key is not in the " +"certificate file) using the parameters 'clientcert' and 'clientkey', e.g. " +"https://hostname/path?clientcert=/path/to/cert&clientkey=/path/to/key. Any " +"parameters or credentials used for http(s) publishers are removed from the " +"URL before the actual HTTP request is made." + +msgid "" +"In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set " +"to True to activate sending meters and events in batches, or False (default " +"value) to send each meter and event with a fresh HTTP call." +msgstr "" +"In the [dispatcher_http] section of ceilometer.conf, batch_mode can be set " +"to True to activate sending meters and events in batches, or False (default " +"value) to send each meter and event with a fresh HTTP call." + +msgid "" +"In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set " +"to True to use system-installed certificates (default value) or False to " +"ignore certificate verification (use in development only!). verify_ssl can " +"also be set to the location of a certificate file e.g. /some/path/cert.crt " +"(use for self-signed certs) or to a directory of certificates. The value is " +"passed as the 'verify' option to the underlying requests method, which is " +"documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-" +"cert-verification" +msgstr "" +"In the [dispatcher_http] section of ceilometer.conf, verify_ssl can be set " +"to True to use system-installed certificates (default value) or False to " +"ignore certificate verification (use in development only!). verify_ssl can " +"also be set to the location of a certificate file e.g. /some/path/cert.crt " +"(use for self-signed certs) or to a directory of certificates. The value is " +"passed as the 'verify' option to the underlying requests method, which is " +"documented at http://docs.python-requests.org/en/master/user/advanced/#ssl-" +"cert-verification" + +msgid "Kafka publisher is deprecated to use generic notifier instead." +msgstr "Kafka publisher is deprecated to use generic notifier instead." + +msgid "Known Issues" +msgstr "Known Issues" + +msgid "Liberty Series Release Notes" +msgstr "Liberty Series Release Notes" + +msgid "Mitaka Release Notes" +msgstr "Mitaka Release Notes" + +msgid "Network Statistics From OpenDaylight." +msgstr "Network Statistics From OpenDaylight." + +msgid "" +"Neutron API is not designed to be polled against. When polling against " +"Neutron is enabled, Ceilometer's polling agents may generage a significant " +"load against the Neutron API. It is recommended that a dedicated API be " +"enabled for polling while Neutron's API is improved to handle polling." +msgstr "" +"Neutron API is not designed to be polled against. When polling against " +"Neutron is enabled, Ceilometer's polling agents may generate a significant " +"load against the Neutron API. It is recommended that a dedicated API be " +"enabled for polling while Neutron's API is improved to handle polling." + +msgid "New Features" +msgstr "New Features" + +msgid "Newton Release Notes" +msgstr "Newton Release Notes" + +msgid "Ocata Series Release Notes" +msgstr "Ocata Series Release Notes" + +msgid "Other Notes" +msgstr "Other Notes" + +msgid "Pike Series Release Notes" +msgstr "Pike Series Release Notes" + +msgid "" +"Pipeline Partitioning is also deprecated. This was only useful to workaround " +"of some issues that tranformers has." +msgstr "" +"Pipeline Partitioning is also deprecated. This was only useful to workaround " +"some issues that transformers had." + +msgid "" +"Pipeline processing in polling agents was removed in Liberty cycle. A new " +"polling specific definition file is created to handle polling functionality " +"and pipeline definition file is now reserved exclusively for transformations " +"and routing. The polling.yaml file follows the same syntax as the pipeline." +"yaml but only handles polling attributes such as interval, discovery, " +"resources, meter matching. It is configured by setting cfg_file under the " +"polling section.If no polling definition file is found, it will fallback to " +"reuse pipeline_cfg_file." +msgstr "" +"Pipeline processing in polling agents was removed in Liberty cycle. A new " +"polling specific definition file is created to handle polling functionality " +"and pipeline definition file is now reserved exclusively for transformations " +"and routing. The polling.yaml file follows the same syntax as the pipeline." +"yaml but only handles polling attributes such as interval, discovery, " +"resources, meter matching. It is configured by setting cfg_file under the " +"polling section.If no polling definition file is found, it will fallback to " +"reuse pipeline_cfg_file." + +msgid "" +"Pipeline.yaml files for agents should be updated to notifier:// or udp:// " +"publishers. The rpc:// publisher is no longer supported." +msgstr "" +"Pipeline.yaml files for agents should be updated to notifier:// or udp:// " +"publishers. The rpc:// publisher is no longer supported." + +msgid "Prelude" +msgstr "Prelude" + +msgid "Previously deprecated kwapi meters are not removed." +msgstr "Previously deprecated Kwapi meters are not removed." + +msgid "" +"Previously, to enable/disable radosgw.* meters, you must define entry_point " +"name rather than meter name. This is corrected so you do not need to be " +"aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw " +"meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and " +"will be removed in Rocky." +msgstr "" +"Previously, to enable/disable radosgw.* meters, you must define entry_point " +"name rather than meter name. This is corrected so you do not need to be " +"aware of entry_point naming. Use `radosgw.*` to enable/disable radosgw " +"meters explicitly rather than `rgw.*`. `rgw.*` support is deprecated and " +"will be removed in Rocky." + +msgid "Queens Series Release Notes" +msgstr "Queens Series Release Notes" + +msgid "" +"RPC collector support is dropped. The queue-based notifier publisher and " +"collector was added as the recommended alternative as of Icehouse cycle." +msgstr "" +"RPC collector support is dropped. The queue-based notifier publisher and " +"collector was added as the recommended alternative as of Icehouse cycle." + +msgid "Remove direct publisher and use the explicit publisher instead." +msgstr "Remove direct publisher and use the explicit publisher instead." + +msgid "Remove eventlet from Ceilometer in favour of threaded approach" +msgstr "Remove eventlet from Ceilometer in favour of threaded approach" + +msgid "Run db-sync to add new indices." +msgstr "Run db-sync to add new indices." + +msgid "" +"Samples are required to measure some aspect of a resource. Samples not " +"measuring anything will be dropped." +msgstr "" +"Samples are required to measure some aspect of a resource. Samples not " +"measuring anything will be dropped." + +msgid "" +"Ship YAML files to ceilometer/pipeline/data/ make it convenient to update " +"all the files on upgrade. Users can copy yaml files from /usr/share/" +"ceilometer and customise their own files located in /etc/ceilometer/." +msgstr "" +"Ship YAML files to ceilometer/pipeline/data/ make it convenient to update " +"all the files on upgrade. Users can copy yaml files from /usr/share/" +"ceilometer and customise their own files located in /etc/ceilometer/." + +msgid "" +"Since the Glance v1 APIs won't be maintained any more, this change add the " +"support of glance v2 in images pollsters." +msgstr "" +"Since the Glance v1 APIs won't be maintained any more, this change add the " +"support of glance v2 in images pollsters." + +msgid "Start using reno to manage release notes." +msgstr "Start using Reno to manage release notes." + +msgid "" +"Support for CADF-only payload in HTTP dispatcher is dropped as audit " +"middleware in pyCADF was dropped in Kilo cycle." +msgstr "" +"Support for CADF-only payload in HTTP dispatcher is dropped as audit " +"middleware in pyCADF was dropped in Kilo cycle." + +msgid "" +"Support for CORS is added. More information can be found [`here `_]" +msgstr "" +"Support for CORS is added. More information can be found [`here `_]" + +msgid "" +"Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is " +"deprecated. The same metrics are available between v1 and v2." +msgstr "" +"Support for polling Neutron's LBaaS v2 API was added as v1 API in Neutron is " +"deprecated. The same metrics are available between v1 and v2." + +msgid "" +"Support loading multiple meter definition files and allow users to add their " +"own meter definitions into several files according to different types of " +"metrics under the directory of /etc/ceilometer/meters.d." +msgstr "" +"Support loading multiple meter definition files and allow users to add their " +"own meter definitions into several files according to different types of " +"metrics under the directory of /etc/ceilometer/meters.d." + +msgid "" +"Support resource caching in Gnocchi dispatcher to improve write performance " +"to avoid additional queries." +msgstr "" +"Support resource caching in Gnocchi dispatcher to improve write performance " +"to avoid additional queries." + +msgid "" +"The Ceilometer compute agent can now retrieve some instance metadata from " +"the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova " +"fills this metadata with some information about the instance. To enable this " +"feature you should set [compute]/instance_discovery_method = " +"libvirt_metadata in the configuration file. The only downside of this method " +"is that user_metadata (and some other instance attributes) are no longer " +"part of the samples created by the agent. But when Gnocchi is used as " +"backend, this is not an issue since Gnocchi doesn't store resource metadata " +"aside of the measurements. And the missing informations are still retrieved " +"through the Nova notifications and will fully update the resource " +"information in Gnocchi." +msgstr "" +"The Ceilometer compute agent can now retrieve some instance metadata from " +"the metadata libvirt API instead of polling the Nova API. Since Mitaka, Nova " +"fills this metadata with some information about the instance. To enable this " +"feature you should set [compute]/instance_discovery_method = " +"libvirt_metadata in the configuration file. The only downside of this method " +"is that user_metadata (and some other instance attributes) are no longer " +"part of the samples created by the agent. But when Gnocchi is used as " +"backend, this is not an issue since Gnocchi doesn't store resource metadata " +"aside of the measurements. And the missing information is still retrieved " +"through the Nova notifications and will fully update the resource " +"information in Gnocchi." + +msgid "" +"The Events API (exposed at /v2/events) which was deprecated has been " +"removed. The Panko project is now responsible for providing this API and can " +"be installed separately." +msgstr "" +"The Events API (exposed at /v2/events) which was deprecated has been " +"removed. The Panko project is now responsible for providing this API and can " +"be installed separately." + +msgid "" +"The Gnocchi dispatcher has been removed and replaced by a native Gnocchi " +"publisher. The configuration options from the `[dispatcher_gnocchi]` has " +"been removed and should be passed via the URL in `pipeline.yaml`. The " +"service authentication override can be done by adding specific credentials " +"to a `[gnocchi]` section instead." +msgstr "" +"The Gnocchi dispatcher has been removed and replaced by a native Gnocchi " +"publisher. The configuration options from the `[dispatcher_gnocchi]` has " +"been removed and should be passed via the URL in `pipeline.yaml`. The " +"service authentication override can be done by adding specific credentials " +"to a `[gnocchi]` section instead." + +msgid "" +"The Kwapi pollsters are deprecated and will be removed in the next major " +"version of Ceilometer." +msgstr "" +"The Kwapi pollsters are deprecated and will be removed in the next major " +"version of Ceilometer." + +msgid "" +"The [compute]/workload_partitioning = True is deprecated in favor of " +"[compute]/instance_discovery_method = workload_partitioning" +msgstr "" +"The [compute]/workload_partitioning = True is deprecated in favour of " +"[compute]/instance_discovery_method = workload_partitioning" + +msgid "The `image` meter is dropped in favour of `image.size` meter." +msgstr "The `image` meter is dropped in favour of `image.size` meter." + +msgid "The `instance` meter no longer will be generated." +msgstr "The `instance` meter no longer will be generated." + +msgid "" +"The `instance` meter no longer will be generated. For equivalent " +"functionality, perform the exact same query on any compute meter such as " +"`cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc..." +msgstr "" +"The `instance` meter no longer will be generated. For equivalent " +"functionality, perform the exact same query on any compute meter such as " +"`cpu`, `disk.read.requests`, `memory.usage`, `network.incoming.bytes`, etc..." + +msgid "" +"The `shuffle_time_before_polling_task` option has been removed. This option " +"never worked in the way it was originally intended too." +msgstr "" +"The `shuffle_time_before_polling_task` option has been removed. This option " +"never worked in the way it was originally intended to." + +msgid "" +"The api-paste.ini file can be modified to include or exclude the CORs " +"middleware. Additional configurations can be made to middleware as well." +msgstr "" +"The api-paste.ini file can be modified to include or exclude the CORs " +"middleware. Additional configurations can be made to middleware as well." + +msgid "The api.pecan_debug option has been removed." +msgstr "The api.pecan_debug option has been removed." + +msgid "" +"The collector service is removed. From Ocata, it's possible to edit the " +"pipeline.yaml and event_pipeline.yaml files and modify the publisher to " +"provide the same functionality as collector dispatcher. You may change " +"publisher to 'gnocchi', 'http', 'panko', or any combination of available " +"publishers listed in documentation." +msgstr "" +"The collector service is removed. From Ocata, it's possible to edit the " +"pipeline.yaml and event_pipeline.yaml files and modify the publisher to " +"provide the same functionality as collector dispatcher. You may change " +"publisher to 'gnocchi', 'http', 'panko', or any combination of available " +"publishers listed in documentation." + +msgid "The deprecated Ceilometer API has been removed." +msgstr "The deprecated Ceilometer API has been removed." + +msgid "" +"The deprecated `compute.workload_partitioning` option has been removed in " +"favor of `compute.instance_discovery_method`." +msgstr "" +"The deprecated `compute.workload_partitioning` option has been removed in " +"favour of `compute.instance_discovery_method`." + +msgid "The deprecated `nova_http_log_debug` option has been removed." +msgstr "The deprecated `nova_http_log_debug` option has been removed." + +msgid "The deprecated `pollster-list` option has been removed." +msgstr "The deprecated `pollster-list` option has been removed." + +msgid "" +"The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade " +"instead." +msgstr "" +"The deprecated ceilometer-dbsync has been removed. Use ceilometer-upgrade " +"instead." + +msgid "The deprecated control exchange options have been removed." +msgstr "The deprecated control exchange options have been removed." + +msgid "The deprecated file dispatcher has been removed." +msgstr "The deprecated file dispatcher has been removed." + +msgid "The deprecated http dispatcher has been removed." +msgstr "The deprecated http dispatcher has been removed." + +msgid "" +"The deprecated kafka publisher has been removed, use NotifierPublisher " +"instead." +msgstr "" +"The deprecated Kafka publisher has been removed, use NotifierPublisher " +"instead." + +msgid "" +"The deprecated support of configure polling in the `pipeline.yaml` file has " +"been removed. Ceilometer now only uses the `polling.yaml` file for polling " +"configuration." +msgstr "" +"The deprecated support of configure polling in the `pipeline.yaml` file has " +"been removed. Ceilometer now only uses the `polling.yaml` file for polling " +"configuration." + +msgid "" +"The event database dispatcher is now deprecated. It has been moved to a new " +"project, alongside the Ceilometer API for /v2/events, called Panko." +msgstr "" +"The event database dispatcher is now deprecated. It has been moved to a new " +"project, alongside the Ceilometer API for /v2/events, called Panko." + +msgid "" +"The notification-agent can now be configured to either build meters or " +"events. By default, the notification agent will continue to load both " +"pipelines and build both data models. To selectively enable a pipeline, " +"configure the `pipelines` option under the `[notification]` section." +msgstr "" +"The notification-agent can now be configured to either build meters or " +"events. By default, the notification agent will continue to load both " +"pipelines and build both data models. To selectively enable a pipeline, " +"configure the `pipelines` option under the `[notification]` section." + +msgid "" +"The option 'glance_page_size' has been removed because it's not actually " +"needed." +msgstr "" +"The option 'glance_page_size' has been removed because it's not actually " +"needed." + +msgid "" +"The options 'requeue_event_on_dispatcher_error' and " +"'requeue_sample_on_dispatcher_error' have been enabled and removed." +msgstr "" +"The options 'requeue_event_on_dispatcher_error' and " +"'requeue_sample_on_dispatcher_error' have been enabled and removed." + +msgid "" +"The pipeline dynamic refresh code has been removed. Ceilometer relies on the " +"cotyledon library for a few releases which provides reload functionality by " +"sending the SIGHUP signal to the process. This achieves the same feature " +"while making sure the reload is explicit once the file is correctly and " +"entirely written to the disk, avoiding the failing load of half-written " +"files." +msgstr "" +"The pipeline dynamic refresh code has been removed. Ceilometer relies on the " +"cotyledon library for a few releases which provides reload functionality by " +"sending the SIGHUP signal to the process. This achieves the same feature " +"while making sure the reload is explicit once the file is correctly and " +"entirely written to the disk, avoiding the failing load of half-written " +"files." + +msgid "" +"The previous configuration options default for " +"'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' " +"allowed to lose data very easily: if the dispatcher failed to send data to " +"the backend (e.g. Gnocchi is down), then the dispatcher raised and the data " +"were lost forever. This was completely unacceptable, and nobody should be " +"able to configure Ceilometer in that way.\"" +msgstr "" +"The previous configuration options default for " +"'requeue_sample_on_dispatcher_error' and 'requeue_event_on_dispatcher_error' " +"allowed to lose data very easily: if the dispatcher failed to send data to " +"the backend (e.g. Gnocchi is down), then the dispatcher raised and the data " +"were lost forever. This was completely unacceptable, and nobody should be " +"able to configure Ceilometer in that way.\"" + +msgid "" +"The tenant (project) discovery code in the polling agent now scans for " +"tenants in all available domains." +msgstr "" +"The tenant (project) discovery code in the polling agent now scans for " +"tenants in all available domains." + +msgid "" +"The transport_url defined in [oslo_messaging_notifications] was never used, " +"which contradicts the oslo_messaging documentation. This is now fixed." +msgstr "" +"The transport_url defined in [oslo_messaging_notifications] was never used, " +"which contradicts the oslo_messaging documentation. This is now fixed." + +msgid "" +"To minimise load on Nova API, an additional configuration option was added " +"to control discovery interval vs metric polling interval. If " +"resource_update_interval option is configured in compute section, the " +"compute agent will discover new instances based on defined interval. The " +"agent will continue to poll the discovered instances at the interval defined " +"by pipeline." +msgstr "" +"To minimise load on Nova API, an additional configuration option was added " +"to control discovery interval vs metric polling interval. If " +"resource_update_interval option is configured in compute section, the " +"compute agent will discover new instances based on defined interval. The " +"agent will continue to poll the discovered instances at the interval defined " +"by pipeline." + +msgid "" +"To utilize the new policy support. The policy.json file should be updated " +"accordingly. The pre-existing policy.json file will continue to function as " +"it does if policy changes are not required." +msgstr "" +"To utilize the new policy support. The policy.json file should be updated " +"accordingly. The pre-existing policy.json file will continue to function as " +"it does if policy changes are not required." + +msgid "Upgrade Notes" +msgstr "Upgrade Notes" + +msgid "" +"Usage of pipeline.yaml for polling configuration is now deprecated. The " +"dedicated polling.yaml should be used instead." +msgstr "" +"Usage of pipeline.yaml for polling configuration is now deprecated. The " +"dedicated polling.yaml should be used instead." + +msgid "" +"Usage of transformers in Ceilometer pipelines is deprecated. Transformers in " +"Ceilometer have never computed samples correctly when you have multiple " +"workers. This functionality can be done by the storage backend easily " +"without all issues that Ceilometer has. For example, the rating is already " +"computed in Gnocchi today." +msgstr "" +"Usage of transformers in Ceilometer pipelines is deprecated. Transformers in " +"Ceilometer have never computed samples correctly when you have multiple " +"workers. This functionality can be done by the storage backend easily " +"without all issues that Ceilometer has. For example, the rating is already " +"computed in Gnocchi today." + +msgid "" +"Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw." +"*`" +msgstr "" +"Use `radosgw.*` to enable/disable radosgw meters explicitly rather than `rgw." +"*`" + +msgid "" +"With collector service being deprecated, we now have to address the " +"duplication between dispatchers and publishers. The file dispatcher is now " +"marked as deprecated. Use the file publisher to push samples into a file." +msgstr "" +"With collector service being deprecated, we now have to address the " +"duplication between dispatchers and publishers. The file dispatcher is now " +"marked as deprecated. Use the file publisher to push samples into a file." + +msgid "" +"Workload partitioning of notification agent is now split into queues based " +"on pipeline type (sample, event, etc...) rather than per individual " +"pipeline. This will save some memory usage specifically for pipeline " +"definitions with many source/sink combinations." +msgstr "" +"Workload partitioning of notification agent is now split into queues based " +"on pipeline type (sample, event, etc...) rather than per individual " +"pipeline. This will save some memory usage specifically for pipeline " +"definitions with many source/sink combinations." + +msgid "" +"[`bug 1254800 `_] Add " +"better support to catch race conditions when creating event_types" +msgstr "" +"[`bug 1254800 `_] Add " +"better support to catch race conditions when creating event_types" + +msgid "" +"[`bug 1388680 `_] " +"Suppose ability to query for None value when using SQL backend." +msgstr "" +"[`bug 1388680 `_] " +"Suppose ability to query for None value when using SQL backend." + +msgid "" +"[`bug 1480333 `_] " +"Support ability to configure collector to capture events or meters mutally " +"exclusively, rather than capturing both always." +msgstr "" +"[`bug 1480333 `_] " +"Support ability to configure collector to capture events or meters mutally " +"exclusively, rather than capturing both always." + +msgid "" +"[`bug 1491509 `_] Patch " +"to unify timestamp in samples polled by pollsters. Set the time point " +"polling starts as timestamp of samples, and drop timetamping in pollsters." +msgstr "" +"[`bug 1491509 `_] Patch " +"to unify timestamp in samples polled by pollsters. Set the time point " +"polling starts as timestamp of samples, and drop timestamping in pollsters." + +msgid "" +"[`bug 1504495 `_] " +"Configure ceilometer to handle policy.json rules when possible." +msgstr "" +"[`bug 1504495 `_] " +"Configure Ceilometer to handle policy.json rules when possible." + +msgid "" +"[`bug 1506738 `_] [`bug " +"1509677 `_] Optimise SQL " +"backend queries to minimise query load" +msgstr "" +"[`bug 1506738 `_] [`bug " +"1509677 `_] Optimise SQL " +"backend queries to minimise query load" + +msgid "" +"[`bug 1506959 `_] Add " +"support to query unique set of meter names rather than meters associated " +"with each resource. The list is available by adding unique=True option to " +"request." +msgstr "" +"[`bug 1506959 `_] Add " +"support to query unique set of meter names rather than meters associated " +"with each resource. The list is available by adding unique=True option to " +"request." + +msgid "" +"[`bug 1513731 `_] Add " +"support for hardware cpu_util in snmp.yaml" +msgstr "" +"[`bug 1513731 `_] Add " +"support for hardware cpu_util in snmp.yaml" + +msgid "" +"[`bug 1518338 `_] Add " +"support for storing SNMP metrics in Gnocchi.This functionality requires " +"Gnocchi v2.1.0 to be installed." +msgstr "" +"[`bug 1518338 `_] Add " +"support for storing SNMP metrics in Gnocchi.This functionality requires " +"Gnocchi v2.1.0 to be installed." + +msgid "" +"[`bug 1519767 `_] " +"fnmatch functionality in python <= 2.7.9 is not threadsafe. this issue and " +"its potential race conditions are now patched." +msgstr "" +"[`bug 1519767 `_] " +"fnmatch functionality in python <= 2.7.9 is not thread-safe. this issue and " +"its potential race conditions are now patched." + +msgid "" +"[`bug 1523124 `_] Fix " +"gnocchi dispatcher to support UDP collector" +msgstr "" +"[`bug 1523124 `_] Fix " +"Gnocchi dispatcher to support UDP collector" + +msgid "" +"[`bug 1526793 `_] " +"Additional indices were added to better support querying of event data." +msgstr "" +"[`bug 1526793 `_] " +"Additional indices were added to better support querying of event data." + +msgid "" +"[`bug 1530793 `_] " +"network.services.lb.incoming.bytes meter was previous set to incorrect type. " +"It should be a gauge meter." +msgstr "" +"[`bug 1530793 `_] " +"network.services.lb.incoming.bytes meter was previous set to incorrect type. " +"It should be a gauge meter." + +msgid "" +"[`bug 1531626 `_] Ensure " +"aggregator transformer timeout is honoured if size is not provided." +msgstr "" +"[`bug 1531626 `_] Ensure " +"aggregator transformer timeout is honoured if size is not provided." + +msgid "" +"[`bug 1532661 `_] Fix " +"statistics query failures due to large numbers stored in MongoDB. Data from " +"MongoDB is returned as Int64 for big numbers when int and float types are " +"expected. The data is cast to appropriate type to handle large data." +msgstr "" +"[`bug 1532661 `_] Fix " +"statistics query failures due to large numbers stored in MongoDB. Data from " +"MongoDB is returned as Int64 for big numbers when int and float types are " +"expected. The data is cast to appropriate type to handle large data." + +msgid "" +"[`bug 1533787 `_] Fix an " +"issue where agents are not properly getting registered to group when " +"multiple notification agents are deployed. This can result in bad " +"transformation as the agents are not coordinated. It is still recommended to " +"set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when " +"deploying multiple agents." +msgstr "" +"[`bug 1533787 `_] Fix an " +"issue where agents are not properly getting registered to group when " +"multiple notification agents are deployed. This can result in bad " +"transformation as the agents are not coordinated. It is still recommended to " +"set heartbeat_timeout_threshold = 0 in [oslo_messaging_rabbit] section when " +"deploying multiple agents." + +msgid "" +"[`bug 1536338 `_] Patch " +"was added to fix the broken floatingip pollster that polled data from nova " +"api, but since the nova api filtered the data by tenant, ceilometer was not " +"getting any data back. The fix changes the pollster to use the neutron api " +"instead to get the floating ip info." +msgstr "" +"[`bug 1536338 `_] Patch " +"was added to fix the broken floatingip pollster that polled data from Nova " +"API, but since the Nova API filtered the data by tenant, Ceilometer was not " +"getting any data back. The fix changes the pollster to use the Neutron API " +"instead to get the floating IP info." + +msgid "" +"[`bug 1536498 `_] Patch " +"to fix duplicate meter definitions causing duplicate samples. If a duplicate " +"is found, log a warning and skip the meter definition. Note that the first " +"occurance of a meter will be used and any following duplicates will be " +"skipped from processing." +msgstr "" +"[`bug 1536498 `_] Patch " +"to fix duplicate meter definitions causing duplicate samples. If a duplicate " +"is found, log a warning and skip the meter definition. Note that the first " +"occurrence of a meter will be used and any following duplicates will be " +"skipped from processing." + +msgid "" +"[`bug 1536699 `_] Patch " +"to fix volume field lookup in meter definition file. In case the field is " +"missing in the definition, it raises a keyerror and aborts. Instead we " +"should skip the missing field meter and continue with the rest of the " +"definitions." +msgstr "" +"[`bug 1536699 `_] Patch " +"to fix volume field lookup in meter definition file. In case the field is " +"missing in the definition, it raises a key error and aborts. Instead we " +"should skip the missing field meter and continue with the rest of the " +"definitions." + +msgid "" +"[`bug 1539163 `_] Add " +"ability to define whether to use first or last timestamps when aggregating " +"samples. This will allow more flexibility when chaining transformers." +msgstr "" +"[`bug 1539163 `_] Add " +"ability to define whether to use first or last timestamps when aggregating " +"samples. This will allow more flexibility when chaining transformers." + +msgid "" +"[`bug 1542189 `_] Handle " +"malformed resource definitions in gnocchi_resources.yaml gracefully. " +"Currently we raise an exception once we hit a bad resource and skip the " +"rest. Instead the patch skips the bad resource and proceeds with rest of the " +"definitions." +msgstr "" +"[`bug 1542189 `_] Handle " +"malformed resource definitions in gnocchi_resources.yaml gracefully. " +"Currently we raise an exception once we hit a bad resource and skip the " +"rest. Instead the patch skips the bad resource and proceeds with rest of the " +"definitions." + +msgid "" +"[`bug 1550436 `_] Cache " +"json parsers when building parsing logic to handle event and meter " +"definitions. This will improve agent startup and setup time." +msgstr "" +"[`bug 1550436 `_] Cache " +"json parsers when building parsing logic to handle event and meter " +"definitions. This will improve agent startup and setup time." + +msgid "" +"[`bug 1578128 `_] Add a " +"tool that allow users to drop the legacy alarm and alarm_history tables." +msgstr "" +"[`bug 1578128 `_] Add a " +"tool that allow users to drop the legacy alarm and alarm_history tables." + +msgid "" +"[`bug 1597618 `_] Add " +"the full support of snmp v3 user security model." +msgstr "" +"[`bug 1597618 `_] Add " +"the full support of SNMP v3 user security model." + +msgid "" +"[`bug 255569 `_] Fix " +"caching support in Gnocchi dispatcher. Added better locking support to " +"enable smoother cache access." +msgstr "" +"[`bug 255569 `_] Fix " +"caching support in Gnocchi dispatcher. Added better locking support to " +"enable smoother cache access." + +msgid "" +"`ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi." +msgstr "" +"`ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi." + +msgid "" +"`launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked." +msgstr "" +"`launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked." + +msgid "" +"audit middleware in keystonemiddleware library should be used for similar " +"support." +msgstr "" +"audit middleware in keystonemiddleware library should be used for similar " +"support." + +msgid "" +"batch_size and batch_timeout configuration options are added to both " +"[notification] and [collector] sections of configuration. The batch_size " +"controls the number of messages to grab before processing. Similarly, the " +"batch_timeout defines the wait time before processing." +msgstr "" +"batch_size and batch_timeout configuration options are added to both " +"[notification] and [collector] sections of configuration. The batch_size " +"controls the number of messages to grab before processing. Similarly, the " +"batch_timeout defines the wait time before processing." + +msgid "" +"cpu_util and \\*.rate meters are deprecated and will be removed in future " +"release in favor of the Gnocchi rate calculation equivalent." +msgstr "" +"cpu_util and \\*.rate meters are deprecated and will be removed in future " +"release in favour of the Gnocchi rate calculation equivalent." + +msgid "" +"disk.* aggregated metrics for instance are deprecated, in favor of the per " +"disk metrics (disk.device.*). Now, it's up to the backend to provide such " +"aggregation feature. Gnocchi already provides this." +msgstr "" +"disk.* aggregated metrics for instance are deprecated, in favour of the per " +"disk metrics (disk.device.*). Now, it's up to the backend to provide such an " +"aggregation feature. Gnocchi already provides this." + +msgid "gnocchi_resources.yaml in Ceilometer should be updated." +msgstr "gnocchi_resources.yaml in Ceilometer should be updated." + +msgid "gnocchiclient library is now a requirement if using ceilometer+gnocchi." +msgstr "" +"gnocchiclient library is now a requirement if using ceilometer+gnocchi." + +msgid "use memory usable metric from libvirt memoryStats if available." +msgstr "use memory usable metric from libvirt memoryStats if available." diff -Nru ceilometer-10.0.1/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po ceilometer-11.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po --- ceilometer-10.0.1/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,63 @@ +# Gérald LONLAS , 2016. #zanata +msgid "" +msgstr "" +"Project-Id-Version: Ceilometer Release Notes\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2017-11-21 04:18+0000\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"PO-Revision-Date: 2016-10-22 05:24+0000\n" +"Last-Translator: Gérald LONLAS \n" +"Language-Team: French\n" +"Language: fr\n" +"X-Generator: Zanata 3.9.6\n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" + +msgid "5.0.1" +msgstr "5.0.1" + +msgid "5.0.2" +msgstr "5.0.2" + +msgid "5.0.3" +msgstr "5.0.3" + +msgid "6.0.0" +msgstr "6.0.0" + +msgid "7.0.0" +msgstr "7.0.0" + +msgid "Bug Fixes" +msgstr "Corrections de bugs" + +msgid "Ceilometer Release Notes" +msgstr "Note de release de Ceilometer" + +msgid "Critical Issues" +msgstr "Erreurs critiques" + +msgid "Current Series Release Notes" +msgstr "Note de la release actuelle" + +msgid "Deprecation Notes" +msgstr "Notes dépréciées " + +msgid "Known Issues" +msgstr "Problèmes connus" + +msgid "Liberty Series Release Notes" +msgstr "Note de release pour Liberty" + +msgid "New Features" +msgstr "Nouvelles fonctionnalités" + +msgid "Other Notes" +msgstr "Autres notes" + +msgid "Start using reno to manage release notes." +msgstr "Commence à utiliser reno pour la gestion des notes de release" + +msgid "Upgrade Notes" +msgstr "Notes de mises à jours" diff -Nru ceilometer-10.0.1/releasenotes/source/queens.rst ceilometer-11.0.0/releasenotes/source/queens.rst --- ceilometer-10.0.1/releasenotes/source/queens.rst 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/releasenotes/source/queens.rst 2018-07-30 18:08:00.000000000 +0000 @@ -0,0 +1,6 @@ +=================================== + Queens Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/queens diff -Nru ceilometer-10.0.1/RELEASENOTES.rst ceilometer-11.0.0/RELEASENOTES.rst --- ceilometer-10.0.1/RELEASENOTES.rst 2018-06-14 13:58:09.000000000 +0000 +++ ceilometer-11.0.0/RELEASENOTES.rst 2018-07-30 18:10:31.000000000 +0000 @@ -2,18 +2,121 @@ ceilometer ========== -.. _ceilometer_10.0.1: +.. _ceilometer_11.0.0: -10.0.1 +11.0.0 ====== -.. _ceilometer_10.0.1_New Features: +.. _ceilometer_11.0.0_New Features: New Features ------------ -.. releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml @ f4b58ae01e8ddfc515e6f14a0d19d726370f4870 +.. releasenotes/notes/add-disk-latency-metrics-9e5c05108a78c3d9.yaml @ b'8fdd19e78a2053285569cda05cdc4875b716190c' - Add `disk.device.read.latency` and `disk.device.write.latency` meters to capture total time used by read or write operations. +.. releasenotes/notes/instance-record-launched-created-deleted-d7f44df3bbcf0790.yaml @ b'36414e1cebe3a43d962f8d2adfe7cc34742e9057' + +- `launched_at`/`created_at`/`deleted_at` of Nova instances are now tracked. + +.. releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml @ b'2dc21a5f05ee670292a8a7f97952d3942c32f5cf' + +- Add support for configuring the size of samples the poller will send in each batch. + +.. releasenotes/notes/prometheus-bcb201cfe46d5778.yaml @ b'2b8052052d861b856b3522a8d7f857735793f01b' + +- A new pulisher have been added to push data to Prometheus Pushgateway. + +.. releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @ b'e906bcda82918aff000ab76f067a2dc49660d0b4' + +- Archive policies can now be configured per metrics in gnocchi_resources.yaml. + A default list of archive policies is now created by Ceilometer. + They are called "ceilometer-low-rate" for all IOs metrics and "ceilometer-low" + for others. + +.. releasenotes/notes/use-usable-metric-if-available-970ee58e8fdeece6.yaml @ b'2dee485da7a6f2cdf96525fabc18a8c27c8be570' + +- use memory usable metric from libvirt memoryStats if available. + + +.. _ceilometer_11.0.0_Known Issues: + +Known Issues +------------ + +.. releasenotes/notes/gnocchi-no-metric-by-default-b643e09f5ffef2c4.yaml @ b'826ba35c6eb9900bb0a557f6e4f06f7d1b9bd394' + +- Ceilometer created metrics that could never get measures depending on the + polling configuration. Metrics are now created only if Ceilometer gets at + least a measure for them. + + +.. _ceilometer_11.0.0_Upgrade Notes: + +Upgrade Notes +------------- + +.. releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml @ b'663c523328690dfcc30c1ad986ba57e566bd194c' + +- `ceilometer-upgrade` must be run to build IPMI sensor resource in Gnocchi. + +.. releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml @ b'2dc21a5f05ee670292a8a7f97952d3942c32f5cf' + +- batch_size option added to [polling] section of configuration. Use batch_size=0 to disable batching of samples. + +.. releasenotes/notes/remove-gnocchi-dispatcher-options-4f4ba2a155c1a766.yaml @ b'c567258979064d4a6e82057f68587b184ee939aa' + +- The deprecated `gnocchi_dispatcher` option group has been removed. + +.. releasenotes/notes/removed-rgw-ae3d80c2eafc9319.yaml @ b'dd1b7abf329755c8377862328f770e0b7974f5c2' + +- Deprecated `rgw.*` meters have been removed. Use `radosgw.*` instead. + +.. releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @ b'e906bcda82918aff000ab76f067a2dc49660d0b4' + +- Ceilometer now creates it own archive policies in Gnocchi and use them to + create metrics in Gnocchi. Old metrics kept their current archive policies + and will not be updated with ceilometer-upgrade. Only newly created metrics + will be impacted. Archive policy can still be overridden with the publisher url + (e.g: gnocchi://archive_policy=high). + + +.. _ceilometer_11.0.0_Deprecation Notes: + +Deprecation Notes +----------------- + +.. releasenotes/notes/polling-batch-size-7fe11925df8d1221.yaml @ b'2dc21a5f05ee670292a8a7f97952d3942c32f5cf' + +- The option batch_polled_samples in the [DEFAULT] section is deprecated. Use batch_size option in [polling] to configure and/or disable batching. + +.. releasenotes/notes/save-rate-in-gnocchi-66244262bc4b7842.yaml @ b'e906bcda82918aff000ab76f067a2dc49660d0b4' + +- cpu_util and \*.rate meters are deprecated and will be removed in future + release in favor of the Gnocchi rate calculation equivalent. + +.. releasenotes/notes/transformer-ed4b1ea7d1752576.yaml @ b'1dcbd607df0696101b40f77d7721489679ebe0ba' + +- Usage of transformers in Ceilometer pipelines is deprecated. Transformers in Ceilometer + have never computed samples correctly when you have multiple workers. This functionality can + be done by the storage backend easily without all issues that Ceilometer has. For example, the + rating is already computed in Gnocchi today. + +.. releasenotes/notes/transformer-ed4b1ea7d1752576.yaml @ b'1dcbd607df0696101b40f77d7721489679ebe0ba' + +- Pipeline Partitioning is also deprecated. This was only useful to + workaround of some issues that tranformers has. + + +.. _ceilometer_11.0.0_Bug Fixes: + +Bug Fixes +--------- + +.. releasenotes/notes/add-ipmi-sensor-data-gnocchi-70573728499abe86.yaml @ b'663c523328690dfcc30c1ad986ba57e566bd194c' + +- Ceilometer previously did not create IPMI sensor data from IPMI agent or + Ironic in Gnocchi. This data is now pushed to Gnocchi. + diff -Nru ceilometer-10.0.1/run-tests.sh ceilometer-11.0.0/run-tests.sh --- ceilometer-10.0.1/run-tests.sh 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/run-tests.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,14 +0,0 @@ -#!/bin/bash -set -e -set -x - -echo -echo "OS_TEST_PATH: $OS_TEST_PATH" -echo "CEILOMETER_TEST_DEBUG: $CEILOMETER_TEST_DEBUG" -echo - -if [ "$CEILOMETER_TEST_DEBUG" == "True" ]; then - oslo_debug_helper $* -else - ./tools/pretty_tox.sh $* -fi diff -Nru ceilometer-10.0.1/setup.cfg ceilometer-11.0.0/setup.cfg --- ceilometer-10.0.1/setup.cfg 2018-06-14 13:58:09.000000000 +0000 +++ ceilometer-11.0.0/setup.cfg 2018-07-30 18:10:32.000000000 +0000 @@ -149,13 +149,6 @@ port.transmit.bytes = ceilometer.network.statistics.port_v2:PortPollsterTransmitBytes port.receive.drops = ceilometer.network.statistics.port_v2:PortPollsterReceiveDrops port.receive.errors = ceilometer.network.statistics.port_v2:PortPollsterReceiveErrors - # rgw.* are incorrect and deprecated to be removed in Rocky - rgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster - rgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster - rgw.objects = ceilometer.objectstore.rgw:ObjectsPollster - rgw.objects.size = ceilometer.objectstore.rgw:ObjectsSizePollster - rgw.objects.containers = ceilometer.objectstore.rgw:ObjectsContainersPollster - rgw.usage = ceilometer.objectstore.rgw:UsagePollster radosgw.containers.objects = ceilometer.objectstore.rgw:ContainersObjectsPollster radosgw.containers.objects.size = ceilometer.objectstore.rgw:ContainersSizePollster radosgw.objects = ceilometer.objectstore.rgw:ObjectsPollster @@ -231,6 +224,7 @@ udp = ceilometer.publisher.udp:UDPPublisher file = ceilometer.publisher.file:FilePublisher http = ceilometer.publisher.http:HttpPublisher + prometheus = ceilometer.publisher.prometheus:PrometheusPublisher https = ceilometer.publisher.http:HttpPublisher gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher zaqar = ceilometer.publisher.zaqar:ZaqarPublisher diff -Nru ceilometer-10.0.1/.stestr.conf ceilometer-11.0.0/.stestr.conf --- ceilometer-10.0.1/.stestr.conf 1970-01-01 00:00:00.000000000 +0000 +++ ceilometer-11.0.0/.stestr.conf 2018-07-30 18:08:11.000000000 +0000 @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=${OS_TEST_PATH:-ceilometer/tests/unit} +top_dir=./ \ No newline at end of file diff -Nru ceilometer-10.0.1/.testr.conf ceilometer-11.0.0/.testr.conf --- ceilometer-10.0.1/.testr.conf 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/.testr.conf 1970-01-01 00:00:00.000000000 +0000 @@ -1,9 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-600} \ - ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./ceilometer/tests} -t . $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list -# NOTE(chdent): Only used/matches on gabbi-related tests. -group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_(?:prefix_|)[^_]+)_ diff -Nru ceilometer-10.0.1/test-requirements.txt ceilometer-11.0.0/test-requirements.txt --- ceilometer-10.0.1/test-requirements.txt 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/test-requirements.txt 2018-07-30 18:08:11.000000000 +0000 @@ -13,10 +13,9 @@ oslo.vmware>=1.16.0 # Apache-2.0 pyOpenSSL>=0.14 # Apache-2.0 sphinx>=1.6.2 # BSD -testrepository>=0.0.18 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=1.4.0 # MIT gabbi>=1.30.0 # Apache-2.0 requests-aws>=0.1.4 # BSD License (3 clause) -os-testr>=0.4.1 # Apache-2.0 kafka-python>=1.3.2 # Apache-2.0 +stestr>=1.0.0 # Apache-2.0 diff -Nru ceilometer-10.0.1/tools/pretty_tox.sh ceilometer-11.0.0/tools/pretty_tox.sh --- ceilometer-10.0.1/tools/pretty_tox.sh 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/tools/pretty_tox.sh 1970-01-01 00:00:00.000000000 +0000 @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail - -TESTRARGS=$1 - -# --until-failure is not compatible with --subunit see: -# -# https://bugs.launchpad.net/testrepository/+bug/1411804 -# -# this work around exists until that is addressed -if [[ "$TESTARGS" =~ "until-failure" ]]; then - python setup.py testr --slowest --testr-args="$TESTRARGS" -else - python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f -fi diff -Nru ceilometer-10.0.1/tox.ini ceilometer-11.0.0/tox.ini --- ceilometer-10.0.1/tox.ini 2018-06-14 13:55:45.000000000 +0000 +++ ceilometer-11.0.0/tox.ini 2018-07-30 18:08:11.000000000 +0000 @@ -10,32 +10,25 @@ install_command = pip install -U {opts} {packages} usedevelop = True setenv = VIRTUAL_ENV={envdir} - OS_TEST_PATH=ceilometer/tests/unit CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:none} - CEILOMETER_TEST_DEBUG={env:CEILOMETER_TEST_DEBUG:} - debug: CEILOMETER_TEST_DEBUG=True passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE CEILOMETER_* commands = - bash -x {toxinidir}/run-tests.sh "{posargs}" + stestr run {posargs} oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf whitelist_externals = bash -[testenv:integration] -setenv = OS_TEST_PATH=./ceilometer/tests/integration - OS_TEST_TIMEOUT=2400 - GABBI_LIVE_FAIL_IF_NO_TEST=1 -passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* PANKO_* GLANCE_* NOVA_* ADMIN_* -# NOTE(sileht): run gabbi-run to failfast in case of error because testr -# doesn't support --failfast, but we loose the testr report. -commands = - bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml' - [testenv:cover] -setenv = OS_TEST_PATH=ceilometer/tests +basepython = python3 +setenv = + PYTHON=coverage run --source ceilometer --parallel-mode commands = - python setup.py testr --slowest --coverage --testr-args="{posargs}" + stestr run '{posargs}' + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml [testenv:pep8] +basepython = python3 deps = hacking<0.13,>=0.12 doc8 commands = @@ -45,16 +38,20 @@ bash -c "find ceilometer -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:releasenotes] +basepython = python3 commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] +basepython = python3 commands = oslo-config-generator --config-file=etc/ceilometer/ceilometer-config-generator.conf [testenv:docs] +basepython = python3 commands = python setup.py build_sphinx setenv = PYTHONHASHSEED=0 [testenv:venv] +basepython = python3 commands = {posargs} setenv = PYTHONHASHSEED=0 diff -Nru ceilometer-10.0.1/.zuul.yaml ceilometer-11.0.0/.zuul.yaml --- ceilometer-10.0.1/.zuul.yaml 2018-06-14 13:55:35.000000000 +0000 +++ ceilometer-11.0.0/.zuul.yaml 2018-07-30 18:08:11.000000000 +0000 @@ -9,9 +9,25 @@ - openstack-infra/devstack-gate - openstack/ceilometer +- job: + name: telemetry-tox-py37 + parent: openstack-tox + timeout: 2400 + irrelevant-files: + - ^.*\.rst$ + - ^api-ref/.*$ + - ^doc/.*$ + - ^releasenotes/.*$ + vars: + tox_envlist: py37 + bindep_profile: test py37 + nodeset: fedora-latest + - project: check: jobs: + - telemetry-tox-py37 + - openstack-tox-cover - grenade-dsvm-ceilometer: irrelevant-files: - ^(test-|)requirements.txt$ @@ -19,6 +35,7 @@ - telemetry-dsvm-integration gate: jobs: + - telemetry-tox-py37 - grenade-dsvm-ceilometer: irrelevant-files: - ^(test-|)requirements.txt$